Merge pull request #3613 from Gouthamve/update-tsdb

vendor: update tsdb
This commit is contained in:
Goutham Veeramachaneni 2017-12-23 08:30:10 +05:30 committed by GitHub
commit 384903af2e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
20 changed files with 1204 additions and 566 deletions

View file

@ -23,10 +23,101 @@ import (
"github.com/oklog/ulid"
"github.com/pkg/errors"
"github.com/prometheus/tsdb/chunkenc"
"github.com/prometheus/tsdb/chunks"
"github.com/prometheus/tsdb/index"
"github.com/prometheus/tsdb/labels"
)
// IndexWriter serializes the index for a block of series data.
// The methods must be called in the order they are specified in.
type IndexWriter interface {
// AddSymbols registers all string symbols that are encountered in series
// and other indices.
AddSymbols(sym map[string]struct{}) error
// AddSeries populates the index writer with a series and its offsets
// of chunks that the index can reference.
// Implementations may require series to be insert in increasing order by
// their labels.
// The reference numbers are used to resolve entries in postings lists that
// are added later.
AddSeries(ref uint64, l labels.Labels, chunks ...chunks.Meta) error
// WriteLabelIndex serializes an index from label names to values.
// The passed in values chained tuples of strings of the length of names.
WriteLabelIndex(names []string, values []string) error
// WritePostings writes a postings list for a single label pair.
// The Postings here contain refs to the series that were added.
WritePostings(name, value string, it index.Postings) error
// Close writes any finalization and closes the resources associated with
// the underlying writer.
Close() error
}
// IndexReader provides reading access of serialized index data.
type IndexReader interface {
// Symbols returns a set of string symbols that may occur in series' labels
// and indices.
Symbols() (map[string]struct{}, error)
// LabelValues returns the possible label values.
LabelValues(names ...string) (index.StringTuples, error)
// Postings returns the postings list iterator for the label pair.
// The Postings here contain the offsets to the series inside the index.
// Found IDs are not strictly required to point to a valid Series, e.g. during
// background garbage collections.
Postings(name, value string) (index.Postings, error)
// SortedPostings returns a postings list that is reordered to be sorted
// by the label set of the underlying series.
SortedPostings(index.Postings) index.Postings
// Series populates the given labels and chunk metas for the series identified
// by the reference.
// Returns ErrNotFound if the ref does not resolve to a known series.
Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error
// LabelIndices returns a list of string tuples for which a label value index exists.
LabelIndices() ([][]string, error)
// Close releases the underlying resources of the reader.
Close() error
}
// StringTuples provides access to a sorted list of string tuples.
type StringTuples interface {
// Total number of tuples in the list.
Len() int
// At returns the tuple at position i.
At(i int) ([]string, error)
}
// ChunkWriter serializes a time block of chunked series data.
type ChunkWriter interface {
// WriteChunks writes several chunks. The Chunk field of the ChunkMetas
// must be populated.
// After returning successfully, the Ref fields in the ChunkMetas
// are set and can be used to retrieve the chunks from the written data.
WriteChunks(chunks ...chunks.Meta) error
// Close writes any required finalization and closes the resources
// associated with the underlying writer.
Close() error
}
// ChunkReader provides reading access of serialized time series data.
type ChunkReader interface {
// Chunk returns the series data chunk with the given reference.
Chunk(ref uint64) (chunkenc.Chunk, error)
// Close releases all underlying resources of the reader.
Close() error
}
// BlockReader provides reading access to a data block.
type BlockReader interface {
// Index returns an IndexReader over the block's data.
@ -91,8 +182,12 @@ type blockMeta struct {
*BlockMeta
}
const indexFilename = "index"
const metaFilename = "meta.json"
func chunkDir(dir string) string { return filepath.Join(dir, "chunks") }
func walDir(dir string) string { return filepath.Join(dir, "wal") }
func readMetaFile(dir string) (*BlockMeta, error) {
b, err := ioutil.ReadFile(filepath.Join(dir, metaFilename))
if err != nil {
@ -150,17 +245,17 @@ type Block struct {
// OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used
// to instantiate chunk structs.
func OpenBlock(dir string, pool chunks.Pool) (*Block, error) {
func OpenBlock(dir string, pool chunkenc.Pool) (*Block, error) {
meta, err := readMetaFile(dir)
if err != nil {
return nil, err
}
cr, err := NewDirChunkReader(chunkDir(dir), pool)
cr, err := chunks.NewDirReader(chunkDir(dir), pool)
if err != nil {
return nil, err
}
ir, err := NewFileIndexReader(filepath.Join(dir, "index"))
ir, err := index.NewFileReader(filepath.Join(dir, "index"))
if err != nil {
return nil, err
}
@ -289,7 +384,7 @@ func (pb *Block) Delete(mint, maxt int64, ms ...labels.Matcher) error {
return ErrClosing
}
p, absent, err := PostingsForMatchers(pb.indexr, ms...)
p, err := PostingsForMatchers(pb.indexr, ms...)
if err != nil {
return errors.Wrap(err, "select series")
}
@ -300,7 +395,7 @@ func (pb *Block) Delete(mint, maxt int64, ms ...labels.Matcher) error {
stones := memTombstones{}
var lset labels.Labels
var chks []ChunkMeta
var chks []chunks.Meta
Outer:
for p.Next() {
@ -309,12 +404,6 @@ Outer:
return err
}
for _, abs := range absent {
if lset.Get(abs) != "" {
continue Outer
}
}
for _, chk := range chks {
if intervalOverlap(mint, maxt, chk.MinTime, chk.MaxTime) {
// Delete only until the current vlaues and not beyond.
@ -411,9 +500,6 @@ func (pb *Block) Snapshot(dir string) error {
return nil
}
func chunkDir(dir string) string { return filepath.Join(dir, "chunks") }
func walDir(dir string) string { return filepath.Join(dir, "wal") }
func clampInterval(a, b, mint, maxt int64) (int64, int64) {
if a < mint {
a = mint
@ -423,36 +509,3 @@ func clampInterval(a, b, mint, maxt int64) (int64, int64) {
}
return a, b
}
type mmapFile struct {
f *os.File
b []byte
}
func openMmapFile(path string) (*mmapFile, error) {
f, err := os.Open(path)
if err != nil {
return nil, errors.Wrap(err, "try lock file")
}
info, err := f.Stat()
if err != nil {
return nil, errors.Wrap(err, "stat")
}
b, err := mmap(f, int(info.Size()))
if err != nil {
return nil, errors.Wrap(err, "mmap")
}
return &mmapFile{f: f, b: b}, nil
}
func (f *mmapFile) Close() error {
err0 := munmap(f.b)
err1 := f.f.Close()
if err0 != nil {
return err0
}
return err1
}

View file

@ -39,7 +39,7 @@
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package chunks
package chunkenc
import "io"

View file

@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package chunks
package chunkenc
import (
"fmt"

View file

@ -41,7 +41,7 @@
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package chunks
package chunkenc
import (
"encoding/binary"

View file

@ -11,18 +11,22 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
package chunks
import (
"bufio"
"encoding/binary"
"fmt"
"hash"
"hash/crc32"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"github.com/pkg/errors"
"github.com/prometheus/tsdb/chunks"
"github.com/prometheus/tsdb/chunkenc"
"github.com/prometheus/tsdb/fileutil"
)
@ -31,19 +35,19 @@ const (
MagicChunks = 0x85BD40DD
)
// ChunkMeta holds information about a chunk of data.
type ChunkMeta struct {
// Meta holds information about a chunk of data.
type Meta struct {
// Ref and Chunk hold either a reference that can be used to retrieve
// chunk data or the data itself.
// Generally, only one of them is set.
Ref uint64
Chunk chunks.Chunk
Chunk chunkenc.Chunk
MinTime, MaxTime int64 // time range the data covers
}
// writeHash writes the chunk encoding and raw data into the provided hash.
func (cm *ChunkMeta) writeHash(h hash.Hash) error {
func (cm *Meta) writeHash(h hash.Hash) error {
if _, err := h.Write([]byte{byte(cm.Chunk.Encoding())}); err != nil {
return err
}
@ -53,62 +57,27 @@ func (cm *ChunkMeta) writeHash(h hash.Hash) error {
return nil
}
// deletedIterator wraps an Iterator and makes sure any deleted metrics are not
// returned.
type deletedIterator struct {
it chunks.Iterator
var (
errInvalidSize = fmt.Errorf("invalid size")
errInvalidFlag = fmt.Errorf("invalid flag")
errInvalidChecksum = fmt.Errorf("invalid checksum")
)
intervals Intervals
var castagnoliTable *crc32.Table
func init() {
castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
}
func (it *deletedIterator) At() (int64, float64) {
return it.it.At()
// newCRC32 initializes a CRC32 hash with a preconfigured polynomial, so the
// polynomial may be easily changed in one location at a later time, if necessary.
func newCRC32() hash.Hash32 {
return crc32.New(castagnoliTable)
}
func (it *deletedIterator) Next() bool {
Outer:
for it.it.Next() {
ts, _ := it.it.At()
for _, tr := range it.intervals {
if tr.inBounds(ts) {
continue Outer
}
if ts > tr.Maxt {
it.intervals = it.intervals[1:]
continue
}
return true
}
return true
}
return false
}
func (it *deletedIterator) Err() error {
return it.it.Err()
}
// ChunkWriter serializes a time block of chunked series data.
type ChunkWriter interface {
// WriteChunks writes several chunks. The Chunk field of the ChunkMetas
// must be populated.
// After returning successfully, the Ref fields in the ChunkMetas
// are set and can be used to retrieve the chunks from the written data.
WriteChunks(chunks ...ChunkMeta) error
// Close writes any required finalization and closes the resources
// associated with the underlying writer.
Close() error
}
// chunkWriter implements the ChunkWriter interface for the standard
// Writer implements the ChunkWriter interface for the standard
// serialization format.
type chunkWriter struct {
type Writer struct {
dirFile *os.File
files []*os.File
wbuf *bufio.Writer
@ -124,7 +93,8 @@ const (
chunksFormatV1 = 1
)
func newChunkWriter(dir string) (*chunkWriter, error) {
// NewWriter returns a new writer against the given directory.
func NewWriter(dir string) (*Writer, error) {
if err := os.MkdirAll(dir, 0777); err != nil {
return nil, err
}
@ -132,7 +102,7 @@ func newChunkWriter(dir string) (*chunkWriter, error) {
if err != nil {
return nil, err
}
cw := &chunkWriter{
cw := &Writer{
dirFile: dirFile,
n: 0,
crc32: newCRC32(),
@ -141,7 +111,7 @@ func newChunkWriter(dir string) (*chunkWriter, error) {
return cw, nil
}
func (w *chunkWriter) tail() *os.File {
func (w *Writer) tail() *os.File {
if len(w.files) == 0 {
return nil
}
@ -150,7 +120,7 @@ func (w *chunkWriter) tail() *os.File {
// finalizeTail writes all pending data to the current tail file,
// truncates its size, and closes it.
func (w *chunkWriter) finalizeTail() error {
func (w *Writer) finalizeTail() error {
tf := w.tail()
if tf == nil {
return nil
@ -174,7 +144,7 @@ func (w *chunkWriter) finalizeTail() error {
return tf.Close()
}
func (w *chunkWriter) cut() error {
func (w *Writer) cut() error {
// Sync current tail to disk and close.
if err := w.finalizeTail(); err != nil {
return err
@ -216,13 +186,13 @@ func (w *chunkWriter) cut() error {
return nil
}
func (w *chunkWriter) write(b []byte) error {
func (w *Writer) write(b []byte) error {
n, err := w.wbuf.Write(b)
w.n += int64(n)
return err
}
func (w *chunkWriter) WriteChunks(chks ...ChunkMeta) error {
func (w *Writer) WriteChunks(chks ...Meta) error {
// Calculate maximum space we need and cut a new segment in case
// we don't fit into the current one.
maxLen := int64(binary.MaxVarintLen32) // The number of chunks.
@ -272,11 +242,11 @@ func (w *chunkWriter) WriteChunks(chks ...ChunkMeta) error {
return nil
}
func (w *chunkWriter) seq() int {
func (w *Writer) seq() int {
return len(w.files) - 1
}
func (w *chunkWriter) Close() error {
func (w *Writer) Close() error {
if err := w.finalizeTail(); err != nil {
return err
}
@ -285,29 +255,40 @@ func (w *chunkWriter) Close() error {
return w.dirFile.Close()
}
// ChunkReader provides reading access of serialized time series data.
type ChunkReader interface {
// Chunk returns the series data chunk with the given reference.
Chunk(ref uint64) (chunks.Chunk, error)
// Close releases all underlying resources of the reader.
Close() error
// ByteSlice abstracts a byte slice.
type ByteSlice interface {
Len() int
Range(start, end int) []byte
}
// chunkReader implements a SeriesReader for a serialized byte stream
type realByteSlice []byte
func (b realByteSlice) Len() int {
return len(b)
}
func (b realByteSlice) Range(start, end int) []byte {
return b[start:end]
}
func (b realByteSlice) Sub(start, end int) ByteSlice {
return b[start:end]
}
// Reader implements a SeriesReader for a serialized byte stream
// of series data.
type chunkReader struct {
type Reader struct {
// The underlying bytes holding the encoded series data.
bs []ByteSlice
// Closers for resources behind the byte slices.
cs []io.Closer
pool chunks.Pool
pool chunkenc.Pool
}
func newChunkReader(bs []ByteSlice, cs []io.Closer, pool chunks.Pool) (*chunkReader, error) {
cr := chunkReader{pool: pool, bs: bs, cs: cs}
func newReader(bs []ByteSlice, cs []io.Closer, pool chunkenc.Pool) (*Reader, error) {
cr := Reader{pool: pool, bs: bs, cs: cs}
for i, b := range cr.bs {
if b.Len() < 4 {
@ -321,44 +302,44 @@ func newChunkReader(bs []ByteSlice, cs []io.Closer, pool chunks.Pool) (*chunkRea
return &cr, nil
}
// NewChunkReader returns a new chunk reader against the given byte slices.
func NewChunkReader(bs []ByteSlice, pool chunks.Pool) (ChunkReader, error) {
// NewReader returns a new chunk reader against the given byte slices.
func NewReader(bs []ByteSlice, pool chunkenc.Pool) (*Reader, error) {
if pool == nil {
pool = chunks.NewPool()
pool = chunkenc.NewPool()
}
return newChunkReader(bs, nil, pool)
return newReader(bs, nil, pool)
}
// NewDirChunkReader returns a new ChunkReader against sequentially numbered files in the
// NewDirReader returns a new Reader against sequentially numbered files in the
// given directory.
func NewDirChunkReader(dir string, pool chunks.Pool) (ChunkReader, error) {
func NewDirReader(dir string, pool chunkenc.Pool) (*Reader, error) {
files, err := sequenceFiles(dir)
if err != nil {
return nil, err
}
if pool == nil {
pool = chunks.NewPool()
pool = chunkenc.NewPool()
}
var bs []ByteSlice
var cs []io.Closer
for _, fn := range files {
f, err := openMmapFile(fn)
f, err := fileutil.OpenMmapFile(fn)
if err != nil {
return nil, errors.Wrapf(err, "mmap files")
}
cs = append(cs, f)
bs = append(bs, realByteSlice(f.b))
bs = append(bs, realByteSlice(f.Bytes()))
}
return newChunkReader(bs, cs, pool)
return newReader(bs, cs, pool)
}
func (s *chunkReader) Close() error {
func (s *Reader) Close() error {
return closeAll(s.cs...)
}
func (s *chunkReader) Chunk(ref uint64) (chunks.Chunk, error) {
func (s *Reader) Chunk(ref uint64) (chunkenc.Chunk, error) {
var (
seq = int(ref >> 32)
off = int((ref << 32) >> 32)
@ -381,5 +362,47 @@ func (s *chunkReader) Chunk(ref uint64) (chunks.Chunk, error) {
}
r = b.Range(off+n, off+n+int(l))
return s.pool.Get(chunks.Encoding(r[0]), r[1:1+l])
return s.pool.Get(chunkenc.Encoding(r[0]), r[1:1+l])
}
func nextSequenceFile(dir string) (string, int, error) {
names, err := fileutil.ReadDir(dir)
if err != nil {
return "", 0, err
}
i := uint64(0)
for _, n := range names {
j, err := strconv.ParseUint(n, 10, 64)
if err != nil {
continue
}
i = j
}
return filepath.Join(dir, fmt.Sprintf("%0.6d", i+1)), int(i + 1), nil
}
func sequenceFiles(dir string) ([]string, error) {
files, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
var res []string
for _, fi := range files {
if _, err := strconv.ParseUint(fi.Name(), 10, 64); err != nil {
continue
}
res = append(res, filepath.Join(dir, fi.Name()))
}
return res, nil
}
func closeAll(cs ...io.Closer) (err error) {
for _, c := range cs {
if e := c.Close(); e != nil {
err = e
}
}
return err
}

View file

@ -26,8 +26,10 @@ import (
"github.com/oklog/ulid"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/tsdb/chunkenc"
"github.com/prometheus/tsdb/chunks"
"github.com/prometheus/tsdb/fileutil"
"github.com/prometheus/tsdb/index"
"github.com/prometheus/tsdb/labels"
)
@ -56,7 +58,7 @@ type Compactor interface {
// Compact runs compaction against the provided directories. Must
// only be called concurrently with results of Plan().
Compact(dest string, dirs ...string) error
Compact(dest string, dirs ...string) (ulid.ULID, error)
}
// LeveledCompactor implements the Compactor interface.
@ -65,7 +67,7 @@ type LeveledCompactor struct {
metrics *compactorMetrics
logger log.Logger
ranges []int64
chunkPool chunks.Pool
chunkPool chunkenc.Pool
}
type compactorMetrics struct {
@ -123,12 +125,12 @@ func newCompactorMetrics(r prometheus.Registerer) *compactorMetrics {
}
// NewLeveledCompactor returns a LeveledCompactor.
func NewLeveledCompactor(r prometheus.Registerer, l log.Logger, ranges []int64, pool chunks.Pool) (*LeveledCompactor, error) {
func NewLeveledCompactor(r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool) (*LeveledCompactor, error) {
if len(ranges) == 0 {
return nil, errors.Errorf("at least one range must be provided")
}
if pool == nil {
pool = chunks.NewPool()
pool = chunkenc.NewPool()
}
return &LeveledCompactor{
ranges: ranges,
@ -303,7 +305,7 @@ func compactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta {
// Compact creates a new block in the compactor's directory from the blocks in the
// provided directories.
func (c *LeveledCompactor) Compact(dest string, dirs ...string) (err error) {
func (c *LeveledCompactor) Compact(dest string, dirs ...string) (uid ulid.ULID, err error) {
var blocks []BlockReader
var bs []*Block
var metas []*BlockMeta
@ -311,13 +313,13 @@ func (c *LeveledCompactor) Compact(dest string, dirs ...string) (err error) {
for _, d := range dirs {
b, err := OpenBlock(d, c.chunkPool)
if err != nil {
return err
return uid, err
}
defer b.Close()
meta, err := readMetaFile(d)
if err != nil {
return err
return uid, err
}
metas = append(metas, meta)
@ -326,11 +328,11 @@ func (c *LeveledCompactor) Compact(dest string, dirs ...string) (err error) {
}
entropy := rand.New(rand.NewSource(time.Now().UnixNano()))
uid := ulid.MustNew(ulid.Now(), entropy)
uid = ulid.MustNew(ulid.Now(), entropy)
err = c.write(dest, compactBlockMetas(uid, metas...), blocks...)
if err == nil {
return nil
return uid, nil
}
var merr MultiError
@ -342,7 +344,7 @@ func (c *LeveledCompactor) Compact(dest string, dirs ...string) (err error) {
}
}
return merr
return uid, merr
}
func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64) (ulid.ULID, error) {
@ -370,7 +372,7 @@ type instrumentedChunkWriter struct {
trange prometheus.Histogram
}
func (w *instrumentedChunkWriter) WriteChunks(chunks ...ChunkMeta) error {
func (w *instrumentedChunkWriter) WriteChunks(chunks ...chunks.Meta) error {
for _, c := range chunks {
w.size.Observe(float64(len(c.Chunk.Bytes())))
w.samples.Observe(float64(c.Chunk.NumSamples()))
@ -411,7 +413,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe
// data of all blocks.
var chunkw ChunkWriter
chunkw, err = newChunkWriter(chunkDir(tmp))
chunkw, err = chunks.NewWriter(chunkDir(tmp))
if err != nil {
return errors.Wrap(err, "open chunk writer")
}
@ -425,7 +427,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe
}
}
indexw, err := newIndexWriter(tmp)
indexw, err := index.NewWriter(filepath.Join(tmp, indexFilename))
if err != nil {
return errors.Wrap(err, "open index writer")
}
@ -514,7 +516,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
allSymbols[s] = struct{}{}
}
all, err := indexr.Postings(allPostingsKey.Name, allPostingsKey.Value)
all, err := indexr.Postings(index.AllPostingsKey())
if err != nil {
return err
}
@ -534,7 +536,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
// We fully rebuild the postings list index from merged series.
var (
postings = newMemPostings()
postings = index.NewMemPostings()
values = map[string]stringset{}
i = uint64(0)
)
@ -558,7 +560,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
continue
}
newChunk := chunks.NewXORChunk()
newChunk := chunkenc.NewXORChunk()
app, err := newChunk.Appender()
if err != nil {
return err
@ -599,7 +601,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
}
valset.set(l.Value)
}
postings.add(i, lset)
postings.Add(i, lset)
i++
}
@ -619,8 +621,8 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
}
}
for _, l := range postings.sortedKeys() {
if err := indexw.WritePostings(l.Name, l.Value, postings.get(l.Name, l.Value)); err != nil {
for _, l := range postings.SortedKeys() {
if err := indexw.WritePostings(l.Name, l.Value, postings.Get(l.Name, l.Value)); err != nil {
return errors.Wrap(err, "write postings")
}
}
@ -628,18 +630,18 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
}
type compactionSeriesSet struct {
p Postings
p index.Postings
index IndexReader
chunks ChunkReader
tombstones TombstoneReader
l labels.Labels
c []ChunkMeta
c []chunks.Meta
intervals Intervals
err error
}
func newCompactionSeriesSet(i IndexReader, c ChunkReader, t TombstoneReader, p Postings) *compactionSeriesSet {
func newCompactionSeriesSet(i IndexReader, c ChunkReader, t TombstoneReader, p index.Postings) *compactionSeriesSet {
return &compactionSeriesSet{
index: i,
chunks: c,
@ -667,7 +669,7 @@ func (c *compactionSeriesSet) Next() bool {
// Remove completely deleted chunks.
if len(c.intervals) > 0 {
chks := make([]ChunkMeta, 0, len(c.c))
chks := make([]chunks.Meta, 0, len(c.c))
for _, chk := range c.c {
if !(Interval{chk.MinTime, chk.MaxTime}.isSubrange(c.intervals)) {
chks = append(chks, chk)
@ -697,7 +699,7 @@ func (c *compactionSeriesSet) Err() error {
return c.p.Err()
}
func (c *compactionSeriesSet) At() (labels.Labels, []ChunkMeta, Intervals) {
func (c *compactionSeriesSet) At() (labels.Labels, []chunks.Meta, Intervals) {
return c.l, c.c, c.intervals
}
@ -706,13 +708,13 @@ type compactionMerger struct {
aok, bok bool
l labels.Labels
c []ChunkMeta
c []chunks.Meta
intervals Intervals
}
type compactionSeries struct {
labels labels.Labels
chunks []*ChunkMeta
chunks []*chunks.Meta
}
func newCompactionMerger(a, b ChunkSeriesSet) (*compactionMerger, error) {
@ -747,7 +749,7 @@ func (c *compactionMerger) Next() bool {
// While advancing child iterators the memory used for labels and chunks
// may be reused. When picking a series we have to store the result.
var lset labels.Labels
var chks []ChunkMeta
var chks []chunks.Meta
d := c.compare()
// Both sets contain the current series. Chain them into a single one.
@ -788,7 +790,7 @@ func (c *compactionMerger) Err() error {
return c.b.Err()
}
func (c *compactionMerger) At() (labels.Labels, []ChunkMeta, Intervals) {
func (c *compactionMerger) At() (labels.Labels, []chunks.Meta, Intervals) {
return c.l, c.c, c.intervals
}

View file

@ -36,7 +36,7 @@ import (
"github.com/oklog/ulid"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/tsdb/chunks"
"github.com/prometheus/tsdb/chunkenc"
"github.com/prometheus/tsdb/fileutil"
"github.com/prometheus/tsdb/labels"
)
@ -99,7 +99,7 @@ type DB struct {
logger log.Logger
metrics *dbMetrics
opts *Options
chunkPool chunks.Pool
chunkPool chunkenc.Pool
compactor Compactor
// Mutex for that must be held when modifying the general block layout.
@ -185,7 +185,7 @@ func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options) (db
donec: make(chan struct{}),
stopc: make(chan struct{}),
compactionsEnabled: true,
chunkPool: chunks.NewPool(),
chunkPool: chunkenc.NewPool(),
}
db.metrics = newDBMetrics(db, r)
@ -386,7 +386,7 @@ func (db *DB) compact() (changes bool, err error) {
default:
}
if err := db.compactor.Compact(db.dir, plan...); err != nil {
if _, err := db.compactor.Compact(db.dir, plan...); err != nil {
return changes, errors.Wrapf(err, "compact %s", plan)
}
changes = true

View file

@ -5,8 +5,12 @@ import (
"hash"
"hash/crc32"
"unsafe"
"github.com/pkg/errors"
)
var errInvalidSize = errors.New("invalid size")
// enbuf is a helper type to populate a byte slice with various types.
type encbuf struct {
b []byte

48
vendor/github.com/prometheus/tsdb/fileutil/mmap.go generated vendored Normal file
View file

@ -0,0 +1,48 @@
package fileutil
import (
"os"
"github.com/pkg/errors"
)
type MmapFile struct {
f *os.File
b []byte
}
func OpenMmapFile(path string) (*MmapFile, error) {
f, err := os.Open(path)
if err != nil {
return nil, errors.Wrap(err, "try lock file")
}
info, err := f.Stat()
if err != nil {
return nil, errors.Wrap(err, "stat")
}
b, err := mmap(f, int(info.Size()))
if err != nil {
return nil, errors.Wrap(err, "mmap")
}
return &MmapFile{f: f, b: b}, nil
}
func (f *MmapFile) Close() error {
err0 := munmap(f.b)
err1 := f.f.Close()
if err0 != nil {
return err0
}
return err1
}
func (f *MmapFile) File() *os.File {
return f.f
}
func (f *MmapFile) Bytes() []byte {
return f.b
}

View file

@ -13,7 +13,7 @@
// +build !windows,!plan9
package tsdb
package fileutil
import (
"os"

View file

@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
package fileutil
import (
"os"

View file

@ -17,6 +17,7 @@ import (
"math"
"runtime"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
@ -25,7 +26,9 @@ import (
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/tsdb/chunkenc"
"github.com/prometheus/tsdb/chunks"
"github.com/prometheus/tsdb/index"
"github.com/prometheus/tsdb/labels"
)
@ -64,7 +67,7 @@ type Head struct {
symbols map[string]struct{}
values map[string]stringset // label names to possible values
postings *memPostings // postings lists for terms
postings *index.MemPostings // postings lists for terms
tombstones memTombstones
}
@ -185,7 +188,7 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal WAL, chunkRange int64) (
series: newStripeSeries(),
values: map[string]stringset{},
symbols: map[string]struct{}{},
postings: newUnorderedMemPostings(),
postings: index.NewUnorderedMemPostings(),
tombstones: memTombstones{},
}
h.metrics = newHeadMetrics(h, r)
@ -226,7 +229,7 @@ func (h *Head) processWALSamples(
// ReadWAL initializes the head by consuming the write ahead log.
func (h *Head) ReadWAL() error {
defer h.postings.ensureOrder()
defer h.postings.EnsureOrder()
r := h.wal.Reader()
mint := h.MinTime()
@ -574,23 +577,16 @@ func (h *Head) Delete(mint, maxt int64, ms ...labels.Matcher) error {
ir := h.indexRange(mint, maxt)
p, absent, err := PostingsForMatchers(ir, ms...)
p, err := PostingsForMatchers(ir, ms...)
if err != nil {
return errors.Wrap(err, "select series")
}
var stones []Stone
Outer:
for p.Next() {
series := h.series.getByID(p.At())
for _, abs := range absent {
if series.lset.Get(abs) != "" {
continue Outer
}
}
// Delete only until the current values and not beyond.
t0, t1 := clampInterval(mint, maxt, series.minTime(), series.maxTime())
stones = append(stones, Stone{p.At(), Intervals{{t0, t1}}})
@ -623,64 +619,14 @@ func (h *Head) gc() {
h.metrics.chunksRemoved.Add(float64(chunksRemoved))
h.metrics.chunks.Sub(float64(chunksRemoved))
// Remove deleted series IDs from the postings lists. First do a collection
// run where we rebuild all postings that have something to delete
h.postings.mtx.RLock()
type replEntry struct {
idx int
l []uint64
}
collected := map[labels.Label]replEntry{}
for t, p := range h.postings.m {
repl := replEntry{idx: len(p)}
for i, id := range p {
if _, ok := deleted[id]; ok {
// First ID that got deleted, initialize replacement with
// all remaining IDs so far.
if repl.l == nil {
repl.l = make([]uint64, 0, len(p))
repl.l = append(repl.l, p[:i]...)
}
continue
}
// Only add to the replacement once we know we have to do it.
if repl.l != nil {
repl.l = append(repl.l, id)
}
}
if repl.l != nil {
collected[t] = repl
}
}
h.postings.mtx.RUnlock()
// Replace all postings that have changed. Append all IDs that may have
// been added while we switched locks.
h.postings.mtx.Lock()
for t, repl := range collected {
l := append(repl.l, h.postings.m[t][repl.idx:]...)
if len(l) > 0 {
h.postings.m[t] = l
} else {
delete(h.postings.m, t)
}
}
h.postings.mtx.Unlock()
// Remove deleted series IDs from the postings lists.
h.postings.Delete(deleted)
// Rebuild symbols and label value indices from what is left in the postings terms.
h.postings.mtx.RLock()
symbols := make(map[string]struct{})
values := make(map[string]stringset, len(h.values))
for t := range h.postings.m {
h.postings.Iter(func(t labels.Label, _ index.Postings) error {
symbols[t.Name] = struct{}{}
symbols[t.Value] = struct{}{}
@ -690,9 +636,8 @@ func (h *Head) gc() {
values[t.Name] = ss
}
ss.set(t.Value)
}
h.postings.mtx.RUnlock()
return nil
})
h.symMtx.Lock()
@ -772,13 +717,24 @@ func unpackChunkID(id uint64) (seriesID, chunkID uint64) {
}
// Chunk returns the chunk for the reference number.
func (h *headChunkReader) Chunk(ref uint64) (chunks.Chunk, error) {
func (h *headChunkReader) Chunk(ref uint64) (chunkenc.Chunk, error) {
sid, cid := unpackChunkID(ref)
s := h.head.series.getByID(sid)
// This means that the series has been garbage collected.
if s == nil {
return nil, ErrNotFound
}
s.Lock()
c := s.chunk(int(cid))
// This means that the chunk has been garbage collected.
if c == nil {
s.Unlock()
return nil, ErrNotFound
}
mint, maxt := c.minTime, c.maxTime
s.Unlock()
@ -794,12 +750,12 @@ func (h *headChunkReader) Chunk(ref uint64) (chunks.Chunk, error) {
}
type safeChunk struct {
chunks.Chunk
chunkenc.Chunk
s *memSeries
cid int
}
func (c *safeChunk) Iterator() chunks.Iterator {
func (c *safeChunk) Iterator() chunkenc.Iterator {
c.s.Lock()
it := c.s.iterator(c.cid)
c.s.Unlock()
@ -832,7 +788,7 @@ func (h *headIndexReader) Symbols() (map[string]struct{}, error) {
}
// LabelValues returns the possible label values
func (h *headIndexReader) LabelValues(names ...string) (StringTuples, error) {
func (h *headIndexReader) LabelValues(names ...string) (index.StringTuples, error) {
if len(names) != 1 {
return nil, errInvalidSize
}
@ -846,22 +802,22 @@ func (h *headIndexReader) LabelValues(names ...string) (StringTuples, error) {
}
sort.Strings(sl)
return &stringTuples{l: len(names), s: sl}, nil
return index.NewStringTuples(sl, len(names))
}
// Postings returns the postings list iterator for the label pair.
func (h *headIndexReader) Postings(name, value string) (Postings, error) {
return h.head.postings.get(name, value), nil
func (h *headIndexReader) Postings(name, value string) (index.Postings, error) {
return h.head.postings.Get(name, value), nil
}
func (h *headIndexReader) SortedPostings(p Postings) Postings {
func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings {
ep := make([]uint64, 0, 128)
for p.Next() {
ep = append(ep, p.At())
}
if err := p.Err(); err != nil {
return errPostings{err: errors.Wrap(err, "expand postings")}
return index.ErrPostings(errors.Wrap(err, "expand postings"))
}
sort.Slice(ep, func(i, j int) bool {
@ -874,11 +830,11 @@ func (h *headIndexReader) SortedPostings(p Postings) Postings {
}
return labels.Compare(a.lset, b.lset) < 0
})
return newListPostings(ep)
return index.NewListPostings(ep)
}
// Series returns the series for the given reference.
func (h *headIndexReader) Series(ref uint64, lbls *labels.Labels, chks *[]ChunkMeta) error {
func (h *headIndexReader) Series(ref uint64, lbls *labels.Labels, chks *[]chunks.Meta) error {
s := h.head.series.getByID(ref)
if s == nil {
@ -897,7 +853,7 @@ func (h *headIndexReader) Series(ref uint64, lbls *labels.Labels, chks *[]ChunkM
if !intervalOverlap(c.minTime, c.maxTime, h.mint, h.maxt) {
continue
}
*chks = append(*chks, ChunkMeta{
*chks = append(*chks, chunks.Meta{
MinTime: c.minTime,
MaxTime: c.maxTime,
Ref: packChunkID(s.ref, uint64(s.chunkID(i))),
@ -945,7 +901,7 @@ func (h *Head) getOrCreateWithID(id, hash uint64, lset labels.Labels) (*memSerie
h.metrics.series.Inc()
h.metrics.seriesCreated.Inc()
h.postings.add(id, lset)
h.postings.Add(id, lset)
h.symMtx.Lock()
defer h.symMtx.Unlock()
@ -1150,7 +1106,7 @@ type memSeries struct {
lastValue float64
sampleBuf [4]sample
app chunks.Appender // Current appender for the chunk.
app chunkenc.Appender // Current appender for the chunk.
}
func (s *memSeries) minTime() int64 {
@ -1163,7 +1119,7 @@ func (s *memSeries) maxTime() int64 {
func (s *memSeries) cut(mint int64) *memChunk {
c := &memChunk{
chunk: chunks.NewXORChunk(),
chunk: chunkenc.NewXORChunk(),
minTime: mint,
maxTime: math.MinInt64,
}
@ -1291,13 +1247,13 @@ func computeChunkEndTime(start, cur, max int64) int64 {
return start + (max-start)/a
}
func (s *memSeries) iterator(id int) chunks.Iterator {
func (s *memSeries) iterator(id int) chunkenc.Iterator {
c := s.chunk(id)
// TODO(fabxc): Work around! A querier may have retrieved a pointer to a series' chunk,
// which got then garbage collected before it got accessed.
// We must ensure to not garbage collect as long as any readers still hold a reference.
if c == nil {
return chunks.NewNopIterator()
return chunkenc.NewNopIterator()
}
if id-s.firstChunkID < len(s.chunks)-1 {
@ -1322,12 +1278,12 @@ func (s *memSeries) head() *memChunk {
}
type memChunk struct {
chunk chunks.Chunk
chunk chunkenc.Chunk
minTime, maxTime int64
}
type memSafeIterator struct {
chunks.Iterator
chunkenc.Iterator
i int
total int
@ -1352,3 +1308,27 @@ func (it *memSafeIterator) At() (int64, float64) {
s := it.buf[4-(it.total-it.i)]
return s.t, s.v
}
type stringset map[string]struct{}
func (ss stringset) set(s string) {
ss[s] = struct{}{}
}
func (ss stringset) has(s string) bool {
_, ok := ss[s]
return ok
}
func (ss stringset) String() string {
return strings.Join(ss.slice(), ",")
}
func (ss stringset) slice() []string {
slice := make([]string, 0, len(ss))
for k := range ss {
slice = append(slice, k)
}
sort.Strings(slice)
return slice
}

View file

@ -0,0 +1,179 @@
package index
import (
"encoding/binary"
"hash"
"hash/crc32"
"unsafe"
)
// enbuf is a helper type to populate a byte slice with various types.
type encbuf struct {
b []byte
c [binary.MaxVarintLen64]byte
}
func (e *encbuf) reset() { e.b = e.b[:0] }
func (e *encbuf) get() []byte { return e.b }
func (e *encbuf) len() int { return len(e.b) }
func (e *encbuf) putString(s string) { e.b = append(e.b, s...) }
func (e *encbuf) putBytes(b []byte) { e.b = append(e.b, b...) }
func (e *encbuf) putByte(c byte) { e.b = append(e.b, c) }
func (e *encbuf) putBE32int(x int) { e.putBE32(uint32(x)) }
func (e *encbuf) putBE64int(x int) { e.putBE64(uint64(x)) }
func (e *encbuf) putBE64int64(x int64) { e.putBE64(uint64(x)) }
func (e *encbuf) putUvarint32(x uint32) { e.putUvarint64(uint64(x)) }
func (e *encbuf) putUvarint(x int) { e.putUvarint64(uint64(x)) }
func (e *encbuf) putBE32(x uint32) {
binary.BigEndian.PutUint32(e.c[:], x)
e.b = append(e.b, e.c[:4]...)
}
func (e *encbuf) putBE64(x uint64) {
binary.BigEndian.PutUint64(e.c[:], x)
e.b = append(e.b, e.c[:8]...)
}
func (e *encbuf) putUvarint64(x uint64) {
n := binary.PutUvarint(e.c[:], x)
e.b = append(e.b, e.c[:n]...)
}
func (e *encbuf) putVarint64(x int64) {
n := binary.PutVarint(e.c[:], x)
e.b = append(e.b, e.c[:n]...)
}
// putVarintStr writes a string to the buffer prefixed by its varint length (in bytes!).
func (e *encbuf) putUvarintStr(s string) {
b := *(*[]byte)(unsafe.Pointer(&s))
e.putUvarint(len(b))
e.putString(s)
}
// putHash appends a hash over the buffers current contents to the buffer.
func (e *encbuf) putHash(h hash.Hash) {
h.Reset()
_, err := h.Write(e.b)
if err != nil {
panic(err) // The CRC32 implementation does not error
}
e.b = h.Sum(e.b)
}
// decbuf provides safe methods to extract data from a byte slice. It does all
// necessary bounds checking and advancing of the byte slice.
// Several datums can be extracted without checking for errors. However, before using
// any datum, the err() method must be checked.
type decbuf struct {
b []byte
e error
}
func (d *decbuf) uvarint() int { return int(d.uvarint64()) }
func (d *decbuf) uvarint32() uint32 { return uint32(d.uvarint64()) }
func (d *decbuf) be32int() int { return int(d.be32()) }
func (d *decbuf) be64int64() int64 { return int64(d.be64()) }
// crc32 returns a CRC32 checksum over the remaining bytes.
func (d *decbuf) crc32() uint32 {
return crc32.Checksum(d.b, castagnoliTable)
}
func (d *decbuf) uvarintStr() string {
l := d.uvarint64()
if d.e != nil {
return ""
}
if len(d.b) < int(l) {
d.e = errInvalidSize
return ""
}
s := string(d.b[:l])
d.b = d.b[l:]
return s
}
func (d *decbuf) varint64() int64 {
if d.e != nil {
return 0
}
x, n := binary.Varint(d.b)
if n < 1 {
d.e = errInvalidSize
return 0
}
d.b = d.b[n:]
return x
}
func (d *decbuf) uvarint64() uint64 {
if d.e != nil {
return 0
}
x, n := binary.Uvarint(d.b)
if n < 1 {
d.e = errInvalidSize
return 0
}
d.b = d.b[n:]
return x
}
func (d *decbuf) be64() uint64 {
if d.e != nil {
return 0
}
if len(d.b) < 4 {
d.e = errInvalidSize
return 0
}
x := binary.BigEndian.Uint64(d.b)
d.b = d.b[8:]
return x
}
func (d *decbuf) be32() uint32 {
if d.e != nil {
return 0
}
if len(d.b) < 4 {
d.e = errInvalidSize
return 0
}
x := binary.BigEndian.Uint32(d.b)
d.b = d.b[4:]
return x
}
func (d *decbuf) byte() byte {
if d.e != nil {
return 0
}
if len(d.b) < 1 {
d.e = errInvalidSize
return 0
}
x := d.b[0]
d.b = d.b[1:]
return x
}
func (d *decbuf) decbuf(l int) decbuf {
if d.e != nil {
return decbuf{e: d.e}
}
if l > len(d.b) {
return decbuf{e: errInvalidSize}
}
r := decbuf{b: d.b[:l]}
d.b = d.b[l:]
return r
}
func (d *decbuf) err() error { return d.e }
func (d *decbuf) len() int { return len(d.b) }
func (d *decbuf) get() []byte { return d.b }

View file

@ -11,13 +11,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
package index
import (
"bufio"
"encoding/binary"
"fmt"
"hash"
"hash/crc32"
"io"
"math"
"os"
@ -26,6 +27,7 @@ import (
"strings"
"github.com/pkg/errors"
"github.com/prometheus/tsdb/chunks"
"github.com/prometheus/tsdb/fileutil"
"github.com/prometheus/tsdb/labels"
)
@ -35,18 +37,12 @@ const (
MagicIndex = 0xBAAAD700
indexFormatV1 = 1
size_unit = 4
)
const indexFilename = "index"
const compactionPageBytes = minSectorSize * 64
type indexWriterSeries struct {
labels labels.Labels
chunks []ChunkMeta // series file offset of chunks
offset uint32 // index file offset of series reference
chunks []chunks.Meta // series file offset of chunks
offset uint32 // index file offset of series reference
}
type indexWriterSeriesSlice []*indexWriterSeries
@ -87,37 +83,24 @@ func (s indexWriterStage) String() string {
return "<unknown>"
}
// IndexWriter serializes the index for a block of series data.
// The methods must be called in the order they are specified in.
type IndexWriter interface {
// AddSymbols registers all string symbols that are encountered in series
// and other indices.
AddSymbols(sym map[string]struct{}) error
// The table gets initialized with sync.Once but may still cause a race
// with any other use of the crc32 package anywhere. Thus we initialize it
// before.
var castagnoliTable *crc32.Table
// AddSeries populates the index writer with a series and its offsets
// of chunks that the index can reference.
// Implementations may require series to be insert in increasing order by
// their labels.
// The reference numbers are used to resolve entries in postings lists that
// are added later.
AddSeries(ref uint64, l labels.Labels, chunks ...ChunkMeta) error
func init() {
castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
}
// WriteLabelIndex serializes an index from label names to values.
// The passed in values chained tuples of strings of the length of names.
WriteLabelIndex(names []string, values []string) error
// WritePostings writes a postings list for a single label pair.
// The Postings here contain refs to the series that were added.
WritePostings(name, value string, it Postings) error
// Close writes any finalization and closes the resources associated with
// the underlying writer.
Close() error
// newCRC32 initializes a CRC32 hash with a preconfigured polynomial, so the
// polynomial may be easily changed in one location at a later time, if necessary.
func newCRC32() hash.Hash32 {
return crc32.New(castagnoliTable)
}
// indexWriter implements the IndexWriter interface for the standard
// serialization format.
type indexWriter struct {
type Writer struct {
f *os.File
fbuf *bufio.Writer
pos uint64
@ -150,14 +133,17 @@ type indexTOC struct {
postingsTable uint64
}
func newIndexWriter(dir string) (*indexWriter, error) {
// NewWriter returns a new Writer to the given filename.
func NewWriter(fn string) (*Writer, error) {
dir := filepath.Dir(fn)
df, err := fileutil.OpenDir(dir)
if err != nil {
return nil, err
}
defer df.Close() // close for flatform windows
f, err := os.OpenFile(filepath.Join(dir, indexFilename), os.O_CREATE|os.O_WRONLY, 0666)
f, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
return nil, err
}
@ -165,7 +151,7 @@ func newIndexWriter(dir string) (*indexWriter, error) {
return nil, errors.Wrap(err, "sync dir")
}
iw := &indexWriter{
iw := &Writer{
f: f,
fbuf: bufio.NewWriterSize(f, 1<<22),
pos: 0,
@ -187,7 +173,7 @@ func newIndexWriter(dir string) (*indexWriter, error) {
return iw, nil
}
func (w *indexWriter) write(bufs ...[]byte) error {
func (w *Writer) write(bufs ...[]byte) error {
for _, b := range bufs {
n, err := w.fbuf.Write(b)
w.pos += uint64(n)
@ -206,18 +192,18 @@ func (w *indexWriter) write(bufs ...[]byte) error {
}
// addPadding adds zero byte padding until the file size is a multiple size_unit.
func (w *indexWriter) addPadding() error {
p := w.pos % size_unit
func (w *Writer) addPadding(size int) error {
p := w.pos % uint64(size)
if p == 0 {
return nil
}
p = size_unit - p
p = uint64(size) - p
return errors.Wrap(w.write(make([]byte, p)), "add padding")
}
// ensureStage handles transitions between write stages and ensures that IndexWriter
// methods are called in an order valid for the implementation.
func (w *indexWriter) ensureStage(s indexWriterStage) error {
func (w *Writer) ensureStage(s indexWriterStage) error {
if w.stage == s {
return nil
}
@ -256,7 +242,7 @@ func (w *indexWriter) ensureStage(s indexWriterStage) error {
return nil
}
func (w *indexWriter) writeMeta() error {
func (w *Writer) writeMeta() error {
w.buf1.reset()
w.buf1.putBE32(MagicIndex)
w.buf1.putByte(indexFormatV1)
@ -264,7 +250,7 @@ func (w *indexWriter) writeMeta() error {
return w.write(w.buf1.get())
}
func (w *indexWriter) AddSeries(ref uint64, lset labels.Labels, chunks ...ChunkMeta) error {
func (w *Writer) AddSeries(ref uint64, lset labels.Labels, chunks ...chunks.Meta) error {
if err := w.ensureStage(idxStageSeries); err != nil {
return err
}
@ -328,7 +314,7 @@ func (w *indexWriter) AddSeries(ref uint64, lset labels.Labels, chunks ...ChunkM
return nil
}
func (w *indexWriter) AddSymbols(sym map[string]struct{}) error {
func (w *Writer) AddSymbols(sym map[string]struct{}) error {
if err := w.ensureStage(idxStageSymbols); err != nil {
return err
}
@ -361,7 +347,7 @@ func (w *indexWriter) AddSymbols(sym map[string]struct{}) error {
return errors.Wrap(err, "write symbols")
}
func (w *indexWriter) WriteLabelIndex(names []string, values []string) error {
func (w *Writer) WriteLabelIndex(names []string, values []string) error {
if len(values)%len(names) != 0 {
return errors.Errorf("invalid value list length %d for %d names", len(values), len(names))
}
@ -369,14 +355,14 @@ func (w *indexWriter) WriteLabelIndex(names []string, values []string) error {
return errors.Wrap(err, "ensure stage")
}
valt, err := newStringTuples(values, len(names))
valt, err := NewStringTuples(values, len(names))
if err != nil {
return err
}
sort.Sort(valt)
// Align beginning to 4 bytes for more efficient index list scans.
if err := w.addPadding(); err != nil {
if err := w.addPadding(4); err != nil {
return err
}
@ -407,7 +393,7 @@ func (w *indexWriter) WriteLabelIndex(names []string, values []string) error {
}
// writeOffsetTable writes a sequence of readable hash entries.
func (w *indexWriter) writeOffsetTable(entries []hashEntry) error {
func (w *Writer) writeOffsetTable(entries []hashEntry) error {
w.buf2.reset()
w.buf2.putBE32int(len(entries))
@ -428,7 +414,7 @@ func (w *indexWriter) writeOffsetTable(entries []hashEntry) error {
const indexTOCLen = 6*8 + 4
func (w *indexWriter) writeTOC() error {
func (w *Writer) writeTOC() error {
w.buf1.reset()
w.buf1.putBE64(w.toc.symbols)
@ -443,13 +429,13 @@ func (w *indexWriter) writeTOC() error {
return w.write(w.buf1.get())
}
func (w *indexWriter) WritePostings(name, value string, it Postings) error {
func (w *Writer) WritePostings(name, value string, it Postings) error {
if err := w.ensureStage(idxStagePostings); err != nil {
return errors.Wrap(err, "ensure stage")
}
// Align beginning to 4 bytes for more efficient postings list scans.
if err := w.addPadding(); err != nil {
if err := w.addPadding(4); err != nil {
return err
}
@ -506,7 +492,7 @@ type hashEntry struct {
offset uint64
}
func (w *indexWriter) Close() error {
func (w *Writer) Close() error {
if err := w.ensureStage(idxStageDone); err != nil {
return err
}
@ -519,37 +505,6 @@ func (w *indexWriter) Close() error {
return w.f.Close()
}
// IndexReader provides reading access of serialized index data.
type IndexReader interface {
// Symbols returns a set of string symbols that may occur in series' labels
// and indices.
Symbols() (map[string]struct{}, error)
// LabelValues returns the possible label values
LabelValues(names ...string) (StringTuples, error)
// Postings returns the postings list iterator for the label pair.
// The Postings here contain the offsets to the series inside the index.
// Found IDs are not strictly required to point to a valid Series, e.g. during
// background garbage collections.
Postings(name, value string) (Postings, error)
// SortedPostings returns a postings list that is reordered to be sorted
// by the label set of the underlying series.
SortedPostings(Postings) Postings
// Series populates the given labels and chunk metas for the series identified
// by the reference.
// Returns ErrNotFound if the ref does not resolve to a known series.
Series(ref uint64, lset *labels.Labels, chks *[]ChunkMeta) error
// LabelIndices returns the label pairs for which indices exist.
LabelIndices() ([][]string, error)
// Close released the underlying resources of the reader.
Close() error
}
// StringTuples provides access to a sorted list of string tuples.
type StringTuples interface {
// Total number of tuples in the list.
@ -558,7 +513,7 @@ type StringTuples interface {
At(i int) ([]string, error)
}
type indexReader struct {
type Reader struct {
// The underlying byte slice holding the encoded series data.
b ByteSlice
toc indexTOC
@ -568,7 +523,7 @@ type indexReader struct {
// Cached hashmaps of section offsets.
labels map[string]uint32
postings map[string]uint32
postings map[labels.Label]uint32
// Cache of read symbols. Strings that are returned when reading from the
// block are always backed by true strings held in here rather than
// strings that are backed by byte slices from the mmap'd index file. This
@ -576,6 +531,8 @@ type indexReader struct {
// the block has been unmapped.
symbols map[uint32]string
dec *DecoderV1
crc32 hash.Hash32
}
@ -605,26 +562,28 @@ func (b realByteSlice) Sub(start, end int) ByteSlice {
return b[start:end]
}
// NewIndexReader returns a new IndexReader on the given byte slice.
func NewIndexReader(b ByteSlice) (IndexReader, error) {
return newIndexReader(b, nil)
// NewReader returns a new IndexReader on the given byte slice.
func NewReader(b ByteSlice) (*Reader, error) {
return newReader(b, nil)
}
// NewFileIndexReader returns a new index reader against the given index file.
func NewFileIndexReader(path string) (IndexReader, error) {
f, err := openMmapFile(path)
// NewFileReader returns a new index reader against the given index file.
func NewFileReader(path string) (*Reader, error) {
f, err := fileutil.OpenMmapFile(path)
if err != nil {
return nil, err
}
return newIndexReader(realByteSlice(f.b), f)
return newReader(realByteSlice(f.Bytes()), f)
}
func newIndexReader(b ByteSlice, c io.Closer) (*indexReader, error) {
r := &indexReader{
b: b,
c: c,
symbols: map[uint32]string{},
crc32: newCRC32(),
func newReader(b ByteSlice, c io.Closer) (*Reader, error) {
r := &Reader{
b: b,
c: c,
symbols: map[uint32]string{},
labels: map[string]uint32{},
postings: map[labels.Label]uint32{},
crc32: newCRC32(),
}
// Verify magic number.
if b.Len() < 4 {
@ -642,15 +601,56 @@ func newIndexReader(b ByteSlice, c io.Closer) (*indexReader, error) {
}
var err error
r.labels, err = r.readOffsetTable(r.toc.labelIndicesTable)
err = r.readOffsetTable(r.toc.labelIndicesTable, func(key []string, off uint32) error {
if len(key) != 1 {
return errors.Errorf("unexpected key length %d", len(key))
}
r.labels[key[0]] = off
return nil
})
if err != nil {
return nil, errors.Wrap(err, "read label index table")
}
r.postings, err = r.readOffsetTable(r.toc.postingsTable)
return r, errors.Wrap(err, "read postings table")
err = r.readOffsetTable(r.toc.postingsTable, func(key []string, off uint32) error {
if len(key) != 2 {
return errors.Errorf("unexpected key length %d", len(key))
}
r.postings[labels.Label{Name: key[0], Value: key[1]}] = off
return nil
})
if err != nil {
return nil, errors.Wrap(err, "read postings table")
}
r.dec = &DecoderV1{symbols: r.symbols}
return r, nil
}
func (r *indexReader) readTOC() error {
// Range marks a byte range.
type Range struct {
Start, End int64
}
// PostingsRanges returns a new map of byte range in the underlying index file
// for all postings lists.
func (r *Reader) PostingsRanges() (map[labels.Label]Range, error) {
m := map[labels.Label]Range{}
for l, start := range r.postings {
d := r.decbufAt(int(start))
if d.err() != nil {
return nil, d.err()
}
m[l] = Range{
Start: int64(start) + 4,
End: int64(start) + 4 + int64(d.len()),
}
}
return m, nil
}
func (r *Reader) readTOC() error {
if r.b.Len() < indexTOCLen {
return errInvalidSize
}
@ -676,7 +676,7 @@ func (r *indexReader) readTOC() error {
// decbufAt returns a new decoding buffer. It expects the first 4 bytes
// after offset to hold the big endian encoded content length, followed by the contents and the expected
// checksum.
func (r *indexReader) decbufAt(off int) decbuf {
func (r *Reader) decbufAt(off int) decbuf {
if r.b.Len() < off+4 {
return decbuf{e: errInvalidSize}
}
@ -700,7 +700,7 @@ func (r *indexReader) decbufAt(off int) decbuf {
// decbufUvarintAt returns a new decoding buffer. It expects the first bytes
// after offset to hold the uvarint-encoded buffers length, followed by the contents and the expected
// checksum.
func (r *indexReader) decbufUvarintAt(off int) decbuf {
func (r *Reader) decbufUvarintAt(off int) decbuf {
// We never have to access this method at the far end of the byte slice. Thus just checking
// against the MaxVarintLen32 is sufficient.
if r.b.Len() < off+binary.MaxVarintLen32 {
@ -730,7 +730,7 @@ func (r *indexReader) decbufUvarintAt(off int) decbuf {
// readSymbols reads the symbol table fully into memory and allocates proper strings for them.
// Strings backed by the mmap'd memory would cause memory faults if applications keep using them
// after the reader is closed.
func (r *indexReader) readSymbols(off int) error {
func (r *Reader) readSymbols(off int) error {
if off == 0 {
return nil
}
@ -752,16 +752,13 @@ func (r *indexReader) readSymbols(off int) error {
return d.err()
}
// readOffsetTable reads an offset table at the given position and returns a map
// with the key strings concatenated by the 0xff unicode non-character.
func (r *indexReader) readOffsetTable(off uint64) (map[string]uint32, error) {
const sep = "\xff"
// readOffsetTable reads an offset table at the given position calls f for each
// found entry.f
// If f returns an error it stops decoding and returns the received error,
func (r *Reader) readOffsetTable(off uint64, f func([]string, uint32) error) error {
d := r.decbufAt(int(off))
cnt := d.be32()
res := make(map[string]uint32, cnt)
for d.err() == nil && d.len() > 0 && cnt > 0 {
keyCount := int(d.uvarint())
keys := make([]string, 0, keyCount)
@ -769,18 +766,24 @@ func (r *indexReader) readOffsetTable(off uint64) (map[string]uint32, error) {
for i := 0; i < keyCount; i++ {
keys = append(keys, d.uvarintStr())
}
res[strings.Join(keys, sep)] = uint32(d.uvarint())
o := uint32(d.uvarint())
if d.err() != nil {
break
}
if err := f(keys, o); err != nil {
return err
}
cnt--
}
return res, d.err()
return d.err()
}
func (r *indexReader) Close() error {
// Close the reader and its underlying resources.
func (r *Reader) Close() error {
return r.c.Close()
}
func (r *indexReader) lookupSymbol(o uint32) (string, error) {
func (r *Reader) lookupSymbol(o uint32) (string, error) {
s, ok := r.symbols[o]
if !ok {
return "", errors.Errorf("unknown symbol offset %d", o)
@ -788,7 +791,8 @@ func (r *indexReader) lookupSymbol(o uint32) (string, error) {
return s, nil
}
func (r *indexReader) Symbols() (map[string]struct{}, error) {
// Symbols returns a set of symbols that exist within the index.
func (r *Reader) Symbols() (map[string]struct{}, error) {
res := make(map[string]struct{}, len(r.symbols))
for _, s := range r.symbols {
@ -797,7 +801,13 @@ func (r *indexReader) Symbols() (map[string]struct{}, error) {
return res, nil
}
func (r *indexReader) LabelValues(names ...string) (StringTuples, error) {
// SymbolTable returns the symbol table that is used to resolve symbol references.
func (r *Reader) SymbolTable() map[uint32]string {
return r.symbols
}
// LabelValues returns value tuples that exist for the given label name tuples.
func (r *Reader) LabelValues(names ...string) (StringTuples, error) {
const sep = "\xff"
key := strings.Join(names, sep)
@ -830,7 +840,8 @@ type emptyStringTuples struct{}
func (emptyStringTuples) At(i int) ([]string, error) { return nil, nil }
func (emptyStringTuples) Len() int { return 0 }
func (r *indexReader) LabelIndices() ([][]string, error) {
// LabelIndices returns a for which labels or label tuples value indices exist.
func (r *Reader) LabelIndices() ([][]string, error) {
const sep = "\xff"
res := [][]string{}
@ -841,87 +852,38 @@ func (r *indexReader) LabelIndices() ([][]string, error) {
return res, nil
}
func (r *indexReader) Series(ref uint64, lbls *labels.Labels, chks *[]ChunkMeta) error {
d := r.decbufUvarintAt(int(ref))
*lbls = (*lbls)[:0]
*chks = (*chks)[:0]
k := int(d.uvarint())
for i := 0; i < k; i++ {
lno := uint32(d.uvarint())
lvo := uint32(d.uvarint())
if d.err() != nil {
return errors.Wrap(d.err(), "read series label offsets")
}
ln, err := r.lookupSymbol(lno)
if err != nil {
return errors.Wrap(err, "lookup label name")
}
lv, err := r.lookupSymbol(lvo)
if err != nil {
return errors.Wrap(err, "lookup label value")
}
*lbls = append(*lbls, labels.Label{Name: ln, Value: lv})
// Series the series with the given ID and writes its labels and chunks into lbls and chks.
func (r *Reader) Series(id uint64, lbls *labels.Labels, chks *[]chunks.Meta) error {
d := r.decbufUvarintAt(int(id))
if d.err() != nil {
return d.err()
}
// Read the chunks meta data.
k = int(d.uvarint())
if k == 0 {
return nil
}
t0 := d.varint64()
maxt := int64(d.uvarint64()) + t0
ref0 := int64(d.uvarint64())
*chks = append(*chks, ChunkMeta{
Ref: uint64(ref0),
MinTime: t0,
MaxTime: maxt,
})
t0 = maxt
for i := 1; i < k; i++ {
mint := int64(d.uvarint64()) + t0
maxt := int64(d.uvarint64()) + mint
ref0 += d.varint64()
t0 = maxt
if d.err() != nil {
return errors.Wrapf(d.err(), "read meta for chunk %d", i)
}
*chks = append(*chks, ChunkMeta{
Ref: uint64(ref0),
MinTime: mint,
MaxTime: maxt,
})
}
return d.err()
return r.dec.Series(d.get(), lbls, chks)
}
func (r *indexReader) Postings(name, value string) (Postings, error) {
const sep = "\xff"
key := strings.Join([]string{name, value}, sep)
off, ok := r.postings[key]
// Postings returns a postings list for the given label pair.
func (r *Reader) Postings(name, value string) (Postings, error) {
off, ok := r.postings[labels.Label{
Name: name,
Value: value,
}]
if !ok {
return emptyPostings, nil
return EmptyPostings(), nil
}
d := r.decbufAt(int(off))
d.be32() // consume unused postings list length.
return newBigEndianPostings(d.get()), errors.Wrap(d.err(), "get postings bytes")
if d.err() != nil {
return nil, errors.Wrap(d.err(), "get postings entry")
}
_, p, err := r.dec.Postings(d.get())
if err != nil {
return nil, errors.Wrap(err, "decode postings")
}
return p, nil
}
func (r *indexReader) SortedPostings(p Postings) Postings {
// SortedPostings returns the given postings list reordered so that the backing series
// are sorted.
func (r *Reader) SortedPostings(p Postings) Postings {
return p
}
@ -930,7 +892,7 @@ type stringTuples struct {
s []string // flattened tuple entries
}
func newStringTuples(s []string, l int) (*stringTuples, error) {
func NewStringTuples(s []string, l int) (*stringTuples, error) {
if len(s)%l != 0 {
return nil, errors.Wrap(errInvalidSize, "string tuple list")
}
@ -992,3 +954,100 @@ func (t *serializedStringTuples) At(i int) ([]string, error) {
return res, nil
}
// DecoderV1 provides decoding methods for the v1 index file format.
//
// It currently does not contain decoding methods for all entry types but can be extended
// by them if there's demand.
type DecoderV1 struct {
symbols map[uint32]string
}
func (dec *DecoderV1) lookupSymbol(o uint32) (string, error) {
s, ok := dec.symbols[o]
if !ok {
return "", errors.Errorf("unknown symbol offset %d", o)
}
return s, nil
}
// SetSymbolTable set the symbol table to be used for lookups when decoding series
// and label indices
func (dec *DecoderV1) SetSymbolTable(t map[uint32]string) {
dec.symbols = t
}
// Postings returns a postings list for b and its number of elements.
func (dec *DecoderV1) Postings(b []byte) (int, Postings, error) {
d := decbuf{b: b}
n := d.be32int()
l := d.get()
return n, newBigEndianPostings(l), d.err()
}
// Series decodes a series entry from the given byte slice into lset and chks.
func (dec *DecoderV1) Series(b []byte, lbls *labels.Labels, chks *[]chunks.Meta) error {
*lbls = (*lbls)[:0]
*chks = (*chks)[:0]
d := decbuf{b: b}
k := int(d.uvarint())
for i := 0; i < k; i++ {
lno := uint32(d.uvarint())
lvo := uint32(d.uvarint())
if d.err() != nil {
return errors.Wrap(d.err(), "read series label offsets")
}
ln, err := dec.lookupSymbol(lno)
if err != nil {
return errors.Wrap(err, "lookup label name")
}
lv, err := dec.lookupSymbol(lvo)
if err != nil {
return errors.Wrap(err, "lookup label value")
}
*lbls = append(*lbls, labels.Label{Name: ln, Value: lv})
}
// Read the chunks meta data.
k = int(d.uvarint())
if k == 0 {
return nil
}
t0 := d.varint64()
maxt := int64(d.uvarint64()) + t0
ref0 := int64(d.uvarint64())
*chks = append(*chks, chunks.Meta{
Ref: uint64(ref0),
MinTime: t0,
MaxTime: maxt,
})
t0 = maxt
for i := 1; i < k; i++ {
mint := int64(d.uvarint64()) + t0
maxt := int64(d.uvarint64()) + mint
ref0 += d.varint64()
t0 = maxt
if d.err() != nil {
return errors.Wrapf(d.err(), "read meta for chunk %d", i)
}
*chks = append(*chks, chunks.Meta{
Ref: uint64(ref0),
MinTime: mint,
MaxTime: maxt,
})
}
return d.err()
}

View file

@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
package index
import (
"encoding/binary"
@ -23,35 +23,42 @@ import (
"github.com/prometheus/tsdb/labels"
)
// memPostings holds postings list for series ID per label pair. They may be written
var allPostingsKey = labels.Label{}
// AllPostingsKey returns the label key that is used to store the postings list of all existing IDs.
func AllPostingsKey() (name, value string) {
return allPostingsKey.Name, allPostingsKey.Value
}
// MemPostings holds postings list for series ID per label pair. They may be written
// to out of order.
// ensureOrder() must be called once before any reads are done. This allows for quick
// unordered batch fills on startup.
type memPostings struct {
type MemPostings struct {
mtx sync.RWMutex
m map[labels.Label][]uint64
ordered bool
}
// newMemPoistings returns a memPostings that's ready for reads and writes.
func newMemPostings() *memPostings {
return &memPostings{
// NewMemPostings returns a memPostings that's ready for reads and writes.
func NewMemPostings() *MemPostings {
return &MemPostings{
m: make(map[labels.Label][]uint64, 512),
ordered: true,
}
}
// newUnorderedMemPostings returns a memPostings that is not safe to be read from
// NewUnorderedMemPostings returns a memPostings that is not safe to be read from
// until ensureOrder was called once.
func newUnorderedMemPostings() *memPostings {
return &memPostings{
func NewUnorderedMemPostings() *MemPostings {
return &MemPostings{
m: make(map[labels.Label][]uint64, 512),
ordered: false,
}
}
// sortedKeys returns a list of sorted label keys of the postings.
func (p *memPostings) sortedKeys() []labels.Label {
// SortedKeys returns a list of sorted label keys of the postings.
func (p *MemPostings) SortedKeys() []labels.Label {
p.mtx.RLock()
keys := make([]labels.Label, 0, len(p.m))
@ -69,23 +76,26 @@ func (p *memPostings) sortedKeys() []labels.Label {
return keys
}
// Postings returns an iterator over the postings list for s.
func (p *memPostings) get(name, value string) Postings {
// Get returns a postings list for the given label pair.
func (p *MemPostings) Get(name, value string) Postings {
p.mtx.RLock()
l := p.m[labels.Label{Name: name, Value: value}]
p.mtx.RUnlock()
if l == nil {
return emptyPostings
return EmptyPostings()
}
return newListPostings(l)
}
var allPostingsKey = labels.Label{}
// All returns a postings list over all documents ever added.
func (p *MemPostings) All() Postings {
return p.Get(AllPostingsKey())
}
// ensurePostings ensures that all postings lists are sorted. After it returns all further
// EnsureOrder ensures that all postings lists are sorted. After it returns all further
// calls to add and addFor will insert new IDs in a sorted manner.
func (p *memPostings) ensureOrder() {
func (p *MemPostings) EnsureOrder() {
p.mtx.Lock()
defer p.mtx.Unlock()
@ -117,9 +127,66 @@ func (p *memPostings) ensureOrder() {
p.ordered = true
}
// add adds a document to the index. The caller has to ensure that no
// term argument appears twice.
func (p *memPostings) add(id uint64, lset labels.Labels) {
// Delete removes all ids in the given map from the postings lists.
func (p *MemPostings) Delete(deleted map[uint64]struct{}) {
var keys []labels.Label
// Collect all keys relevant for deletion once. New keys added afterwards
// can by definition not be affected by any of the given deletes.
p.mtx.RLock()
for l := range p.m {
keys = append(keys, l)
}
p.mtx.RUnlock()
// For each key we first analyse whether the postings list is affected by the deletes.
// If yes, we actually reallocate a new postings list.
for _, l := range keys {
// Only lock for processing one postings list so we don't block reads for too long.
p.mtx.Lock()
found := false
for _, id := range p.m[l] {
if _, ok := deleted[id]; ok {
found = true
break
}
}
if !found {
p.mtx.Unlock()
continue
}
repl := make([]uint64, 0, len(p.m[l]))
for _, id := range p.m[l] {
if _, ok := deleted[id]; !ok {
repl = append(repl, id)
}
}
if len(repl) > 0 {
p.m[l] = repl
} else {
delete(p.m, l)
}
p.mtx.Unlock()
}
}
// Iter calls f for each postings list. It aborts if f returns an error and returns it.
func (p *MemPostings) Iter(f func(labels.Label, Postings) error) error {
p.mtx.RLock()
defer p.mtx.RUnlock()
for l, p := range p.m {
if err := f(l, newListPostings(p)); err != nil {
return err
}
}
return nil
}
// Add a label set to the postings index.
func (p *MemPostings) Add(id uint64, lset labels.Labels) {
p.mtx.Lock()
for _, l := range lset {
@ -130,7 +197,7 @@ func (p *memPostings) add(id uint64, lset labels.Labels) {
p.mtx.Unlock()
}
func (p *memPostings) addFor(id uint64, l labels.Label) {
func (p *MemPostings) addFor(id uint64, l labels.Label) {
list := append(p.m[l], id)
p.m[l] = list
@ -149,7 +216,8 @@ func (p *memPostings) addFor(id uint64, l labels.Label) {
}
}
func expandPostings(p Postings) (res []uint64, err error) {
// ExpandPostings returns the postings expanded as a slice.
func ExpandPostings(p Postings) (res []uint64, err error) {
for p.Next() {
res = append(res, p.At())
}
@ -189,6 +257,11 @@ func EmptyPostings() Postings {
return emptyPostings
}
// ErrPostings returns new postings that immediately error.
func ErrPostings(err error) Postings {
return errPostings{err}
}
// Intersect returns a new postings list over the intersection of the
// input postings.
func Intersect(its ...Postings) Postings {
@ -259,7 +332,7 @@ func (it *intersectPostings) Err() error {
// Merge returns a new iterator over the union of the input iterators.
func Merge(its ...Postings) Postings {
if len(its) == 0 {
return nil
return EmptyPostings()
}
if len(its) == 1 {
return its[0]
@ -340,12 +413,96 @@ func (it *mergedPostings) Err() error {
return it.b.Err()
}
// Without returns a new postings list that contains all elements from the full list that
// are not in the drop list
func Without(full, drop Postings) Postings {
return newRemovedPostings(full, drop)
}
type removedPostings struct {
full, remove Postings
cur uint64
initialized bool
fok, rok bool
}
func newRemovedPostings(full, remove Postings) *removedPostings {
return &removedPostings{
full: full,
remove: remove,
}
}
func (rp *removedPostings) At() uint64 {
return rp.cur
}
func (rp *removedPostings) Next() bool {
if !rp.initialized {
rp.fok = rp.full.Next()
rp.rok = rp.remove.Next()
rp.initialized = true
}
if !rp.fok {
return false
}
if !rp.rok {
rp.cur = rp.full.At()
rp.fok = rp.full.Next()
return true
}
fcur, rcur := rp.full.At(), rp.remove.At()
if fcur < rcur {
rp.cur = fcur
rp.fok = rp.full.Next()
return true
} else if rcur < fcur {
// Forward the remove postings to the right position.
rp.rok = rp.remove.Seek(fcur)
} else {
// Skip the current posting.
rp.fok = rp.full.Next()
}
return rp.Next()
}
func (rp *removedPostings) Seek(id uint64) bool {
if rp.cur >= id {
return true
}
rp.fok = rp.full.Seek(id)
rp.rok = rp.remove.Seek(id)
rp.initialized = true
return rp.Next()
}
func (rp *removedPostings) Err() error {
if rp.full.Err() != nil {
return rp.full.Err()
}
return rp.remove.Err()
}
// listPostings implements the Postings interface over a plain list.
type listPostings struct {
list []uint64
cur uint64
}
func NewListPostings(list []uint64) Postings {
return newListPostings(list)
}
func newListPostings(list []uint64) *listPostings {
return &listPostings{list: list}
}
@ -434,27 +591,3 @@ func (it *bigEndianPostings) Seek(x uint64) bool {
func (it *bigEndianPostings) Err() error {
return nil
}
type stringset map[string]struct{}
func (ss stringset) set(s string) {
ss[s] = struct{}{}
}
func (ss stringset) has(s string) bool {
_, ok := ss[s]
return ok
}
func (ss stringset) String() string {
return strings.Join(ss.slice(), ",")
}
func (ss stringset) slice() []string {
slice := make([]string, 0, len(ss))
for k := range ss {
slice = append(slice, k)
}
sort.Strings(slice)
return slice
}

View file

@ -14,12 +14,15 @@
package labels
import (
"bufio"
"bytes"
"os"
"sort"
"strconv"
"strings"
"github.com/cespare/xxhash"
"github.com/pkg/errors"
)
const sep = '\xff'
@ -161,3 +164,49 @@ type Slice []Labels
func (s Slice) Len() int { return len(s) }
func (s Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s Slice) Less(i, j int) bool { return Compare(s[i], s[j]) < 0 }
// ReadLabels reads up to n label sets in a JSON formatted file fn. It is mostly useful
// to load testing data.
func ReadLabels(fn string, n int) ([]Labels, error) {
f, err := os.Open(fn)
if err != nil {
return nil, err
}
defer f.Close()
scanner := bufio.NewScanner(f)
var mets []Labels
hashes := map[uint64]struct{}{}
i := 0
for scanner.Scan() && i < n {
m := make(Labels, 0, 10)
r := strings.NewReplacer("\"", "", "{", "", "}", "")
s := r.Replace(scanner.Text())
labelChunks := strings.Split(s, ",")
for _, labelChunk := range labelChunks {
split := strings.Split(labelChunk, ":")
m = append(m, Label{Name: split[0], Value: split[1]})
}
// Order of the k/v labels matters, don't assume we'll always receive them already sorted.
sort.Sort(m)
h := m.Hash()
if _, ok := hashes[h]; ok {
continue
}
mets = append(mets, m)
hashes[h] = struct{}{}
i++
}
if err != nil {
return nil, err
}
if i != n {
return mets, errors.Errorf("requested %d metrics but found %d", n, i)
}
return mets, nil
}

View file

@ -76,6 +76,18 @@ func NewRegexpMatcher(name, pattern string) (Matcher, error) {
return &regexpMatcher{name: name, re: re}, nil
}
// NewRegexpMatcher returns a new matcher verifying that a value matches
// the regular expression pattern. Will panic if the pattern is not a valid
// regular expression.
func NewMustRegexpMatcher(name, pattern string) Matcher {
re, err := regexp.Compile(pattern)
if err != nil {
panic(err)
}
return &regexpMatcher{name: name, re: re}
}
// notMatcher inverts the matching result for a matcher.
type notMatcher struct {
Matcher

View file

@ -19,7 +19,9 @@ import (
"strings"
"github.com/pkg/errors"
"github.com/prometheus/tsdb/chunkenc"
"github.com/prometheus/tsdb/chunks"
"github.com/prometheus/tsdb/index"
"github.com/prometheus/tsdb/labels"
)
@ -202,25 +204,17 @@ func (q *blockQuerier) Close() error {
// PostingsForMatchers assembles a single postings iterator against the index reader
// based on the given matchers. It returns a list of label names that must be manually
// checked to not exist in series the postings list points to.
func PostingsForMatchers(index IndexReader, ms ...labels.Matcher) (Postings, []string, error) {
var (
its []Postings
absent []string
)
func PostingsForMatchers(ix IndexReader, ms ...labels.Matcher) (index.Postings, error) {
var its []index.Postings
for _, m := range ms {
// If the matcher checks absence of a label, don't select them
// but propagate the check into the series set.
if _, ok := m.(*labels.EqualMatcher); ok && m.Matches("") {
absent = append(absent, m.Name())
continue
}
it, err := postingsForMatcher(index, m)
it, err := postingsForMatcher(ix, m)
if err != nil {
return nil, nil, err
return nil, err
}
its = append(its, it)
}
return index.SortedPostings(Intersect(its...)), absent, nil
return ix.SortedPostings(index.Intersect(its...)), nil
}
// tuplesByPrefix uses binary search to find prefix matches within ts.
@ -254,17 +248,24 @@ func tuplesByPrefix(m *labels.PrefixMatcher, ts StringTuples) ([]string, error)
return matches, nil
}
func postingsForMatcher(index IndexReader, m labels.Matcher) (Postings, error) {
func postingsForMatcher(ix IndexReader, m labels.Matcher) (index.Postings, error) {
// If the matcher selects an empty value, it selects all the series which dont
// have the label name set too. See: https://github.com/prometheus/prometheus/issues/3575
// and https://github.com/prometheus/prometheus/pull/3578#issuecomment-351653555
if m.Matches("") {
return postingsForUnsetLabelMatcher(ix, m)
}
// Fast-path for equal matching.
if em, ok := m.(*labels.EqualMatcher); ok {
it, err := index.Postings(em.Name(), em.Value())
it, err := ix.Postings(em.Name(), em.Value())
if err != nil {
return nil, err
}
return it, nil
}
tpls, err := index.LabelValues(m.Name())
tpls, err := ix.LabelValues(m.Name())
if err != nil {
return nil, err
}
@ -289,20 +290,55 @@ func postingsForMatcher(index IndexReader, m labels.Matcher) (Postings, error) {
}
if len(res) == 0 {
return EmptyPostings(), nil
return index.EmptyPostings(), nil
}
var rit []Postings
var rit []index.Postings
for _, v := range res {
it, err := index.Postings(m.Name(), v)
it, err := ix.Postings(m.Name(), v)
if err != nil {
return nil, err
}
rit = append(rit, it)
}
return Merge(rit...), nil
return index.Merge(rit...), nil
}
func postingsForUnsetLabelMatcher(ix IndexReader, m labels.Matcher) (index.Postings, error) {
tpls, err := ix.LabelValues(m.Name())
if err != nil {
return nil, err
}
var res []string
for i := 0; i < tpls.Len(); i++ {
vals, err := tpls.At(i)
if err != nil {
return nil, err
}
if !m.Matches(vals[0]) {
res = append(res, vals[0])
}
}
var rit []index.Postings
for _, v := range res {
it, err := ix.Postings(m.Name(), v)
if err != nil {
return nil, err
}
rit = append(rit, it)
}
allPostings, err := ix.Postings(index.AllPostingsKey())
if err != nil {
return nil, err
}
return index.Without(allPostings, index.Merge(rit...)), nil
}
func mergeStrings(a, b []string) []string {
@ -417,22 +453,23 @@ func (s *mergedSeriesSet) Next() bool {
return true
}
// ChunkSeriesSet exposes the chunks and intervals of a series instead of the
// actual series itself.
type ChunkSeriesSet interface {
Next() bool
At() (labels.Labels, []ChunkMeta, Intervals)
At() (labels.Labels, []chunks.Meta, Intervals)
Err() error
}
// baseChunkSeries loads the label set and chunk references for a postings
// list from an index. It filters out series that have labels set that should be unset.
type baseChunkSeries struct {
p Postings
p index.Postings
index IndexReader
tombstones TombstoneReader
absent []string // labels that must be unset in results.
lset labels.Labels
chks []ChunkMeta
chks []chunks.Meta
intervals Intervals
err error
}
@ -443,7 +480,7 @@ func LookupChunkSeries(ir IndexReader, tr TombstoneReader, ms ...labels.Matcher)
if tr == nil {
tr = EmptyTombstoneReader()
}
p, absent, err := PostingsForMatchers(ir, ms...)
p, err := PostingsForMatchers(ir, ms...)
if err != nil {
return nil, err
}
@ -451,11 +488,10 @@ func LookupChunkSeries(ir IndexReader, tr TombstoneReader, ms ...labels.Matcher)
p: p,
index: ir,
tombstones: tr,
absent: absent,
}, nil
}
func (s *baseChunkSeries) At() (labels.Labels, []ChunkMeta, Intervals) {
func (s *baseChunkSeries) At() (labels.Labels, []chunks.Meta, Intervals) {
return s.lset, s.chks, s.intervals
}
@ -463,14 +499,14 @@ func (s *baseChunkSeries) Err() error { return s.err }
func (s *baseChunkSeries) Next() bool {
var (
lset labels.Labels
chunks []ChunkMeta
err error
lset labels.Labels
chkMetas []chunks.Meta
err error
)
Outer:
for s.p.Next() {
ref := s.p.At()
if err := s.index.Series(ref, &lset, &chunks); err != nil {
if err := s.index.Series(ref, &lset, &chkMetas); err != nil {
// Postings may be stale. Skip if no underlying series exists.
if errors.Cause(err) == ErrNotFound {
continue
@ -479,15 +515,8 @@ Outer:
return false
}
// If a series contains a label that must be absent, it is skipped as well.
for _, abs := range s.absent {
if lset.Get(abs) != "" {
continue Outer
}
}
s.lset = lset
s.chks = chunks
s.chks = chkMetas
s.intervals, err = s.tombstones.Get(s.p.At())
if err != nil {
s.err = errors.Wrap(err, "get tombstones")
@ -496,7 +525,7 @@ Outer:
if len(s.intervals) > 0 {
// Only those chunks that are not entirely deleted.
chks := make([]ChunkMeta, 0, len(s.chks))
chks := make([]chunks.Meta, 0, len(s.chks))
for _, chk := range s.chks {
if !(Interval{chk.MinTime, chk.MaxTime}.isSubrange(s.intervals)) {
chks = append(chks, chk)
@ -523,14 +552,15 @@ type populatedChunkSeries struct {
mint, maxt int64
err error
chks []ChunkMeta
chks []chunks.Meta
lset labels.Labels
intervals Intervals
}
func (s *populatedChunkSeries) At() (labels.Labels, []ChunkMeta, Intervals) {
func (s *populatedChunkSeries) At() (labels.Labels, []chunks.Meta, Intervals) {
return s.lset, s.chks, s.intervals
}
func (s *populatedChunkSeries) Err() error { return s.err }
func (s *populatedChunkSeries) Next() bool {
@ -544,19 +574,31 @@ func (s *populatedChunkSeries) Next() bool {
chks = chks[1:]
}
for i := range chks {
c := &chks[i]
// This is to delete in place while iterating.
for i, rlen := 0, len(chks); i < rlen; i++ {
j := i - (rlen - len(chks))
c := &chks[j]
// Break out at the first chunk that has no overlap with mint, maxt.
if c.MinTime > s.maxt {
chks = chks[:i]
chks = chks[:j]
break
}
c.Chunk, s.err = s.chunks.Chunk(c.Ref)
if s.err != nil {
// This means that the chunk has be garbage collected. Remove it from the list.
if s.err == ErrNotFound {
s.err = nil
// Delete in-place.
chks = append(chks[:j], chks[j+1:]...)
}
return false
}
}
if len(chks) == 0 {
continue
}
@ -608,7 +650,7 @@ func (s *blockSeriesSet) Err() error { return s.err }
// time series data.
type chunkSeries struct {
labels labels.Labels
chunks []ChunkMeta // in-order chunk refs
chunks []chunks.Meta // in-order chunk refs
mint, maxt int64
@ -711,17 +753,17 @@ func (it *chainedSeriesIterator) Err() error {
// chunkSeriesIterator implements a series iterator on top
// of a list of time-sorted, non-overlapping chunks.
type chunkSeriesIterator struct {
chunks []ChunkMeta
chunks []chunks.Meta
i int
cur chunks.Iterator
cur chunkenc.Iterator
maxt, mint int64
intervals Intervals
}
func newChunkSeriesIterator(cs []ChunkMeta, dranges Intervals, mint, maxt int64) *chunkSeriesIterator {
func newChunkSeriesIterator(cs []chunks.Meta, dranges Intervals, mint, maxt int64) *chunkSeriesIterator {
it := cs[0].Chunk.Iterator()
if len(dranges) > 0 {
@ -810,6 +852,46 @@ func (it *chunkSeriesIterator) Err() error {
return it.cur.Err()
}
// deletedIterator wraps an Iterator and makes sure any deleted metrics are not
// returned.
type deletedIterator struct {
it chunkenc.Iterator
intervals Intervals
}
func (it *deletedIterator) At() (int64, float64) {
return it.it.At()
}
func (it *deletedIterator) Next() bool {
Outer:
for it.it.Next() {
ts, _ := it.it.At()
for _, tr := range it.intervals {
if tr.inBounds(ts) {
continue Outer
}
if ts > tr.Maxt {
it.intervals = it.intervals[1:]
continue
}
return true
}
return true
}
return false
}
func (it *deletedIterator) Err() error {
return it.it.Err()
}
type mockSeriesSet struct {
next func() bool
series func() Series

View file

@ -109,7 +109,9 @@ type Stone struct {
func readTombstones(dir string) (memTombstones, error) {
b, err := ioutil.ReadFile(filepath.Join(dir, tombstoneFilename))
if err != nil {
if os.IsNotExist(err) {
return memTombstones{}, nil
} else if err != nil {
return nil, err
}

36
vendor/vendor.json vendored
View file

@ -794,28 +794,40 @@
"revisionTime": "2016-04-11T19:08:41Z"
},
{
"checksumSHA1": "XgGOJ06okm8qd+x/gdRj48RgXsg=",
"checksumSHA1": "C72p7MMwA94LrpqYD3dMcQEKHzY=",
"path": "github.com/prometheus/tsdb",
"revision": "30bbbe34f8787df072cf04563bc98fb8094ba070",
"revisionTime": "2017-11-30T09:58:01Z"
"revision": "07ef80820ef1250db82f9544f3fcf7f0f63ccee0",
"revisionTime": "2017-12-22T14:44:41Z"
},
{
"checksumSHA1": "C5V8KPHm/gZF0qrNwmIEDdG6rhA=",
"checksumSHA1": "XTirmk6Pq5TBGIZEaN5VL4k3i1s=",
"path": "github.com/prometheus/tsdb/chunkenc",
"revision": "07ef80820ef1250db82f9544f3fcf7f0f63ccee0",
"revisionTime": "2017-12-22T14:44:41Z"
},
{
"checksumSHA1": "+zsn1i8cqwgZXL8Bg6jDy32xjAo=",
"path": "github.com/prometheus/tsdb/chunks",
"revision": "30bbbe34f8787df072cf04563bc98fb8094ba070",
"revisionTime": "2017-11-30T09:58:01Z"
"revision": "07ef80820ef1250db82f9544f3fcf7f0f63ccee0",
"revisionTime": "2017-12-22T14:44:41Z"
},
{
"checksumSHA1": "7RhNAVcmDmLFqn9nWiudT0B76f8=",
"checksumSHA1": "h49AAcJ5+iRBwCgbfQf+2T1E1ZE=",
"path": "github.com/prometheus/tsdb/fileutil",
"revision": "30bbbe34f8787df072cf04563bc98fb8094ba070",
"revisionTime": "2017-11-30T09:58:01Z"
"revision": "07ef80820ef1250db82f9544f3fcf7f0f63ccee0",
"revisionTime": "2017-12-22T14:44:41Z"
},
{
"checksumSHA1": "zhmlvc322RH1L3l9DaA9d/HVVWs=",
"checksumSHA1": "y3lUn43gcc3HtYd20UDujyybGq4=",
"path": "github.com/prometheus/tsdb/index",
"revision": "07ef80820ef1250db82f9544f3fcf7f0f63ccee0",
"revisionTime": "2017-12-22T14:44:41Z"
},
{
"checksumSHA1": "Va8HWvOFTwFeewZFadMAOzNGDps=",
"path": "github.com/prometheus/tsdb/labels",
"revision": "30bbbe34f8787df072cf04563bc98fb8094ba070",
"revisionTime": "2017-11-30T09:58:01Z"
"revision": "07ef80820ef1250db82f9544f3fcf7f0f63ccee0",
"revisionTime": "2017-12-22T14:44:41Z"
},
{
"checksumSHA1": "5SYLEhADhdBVZAGPVHWggQl7H8k=",