prometheus/compact.go

487 lines
10 KiB
Go
Raw Normal View History

package tsdb
import (
2017-02-27 01:46:15 -08:00
"math/rand"
"os"
2017-01-02 05:41:13 -08:00
"path/filepath"
2017-01-03 06:43:26 -08:00
"time"
2017-01-02 05:41:13 -08:00
"github.com/coreos/etcd/pkg/fileutil"
"github.com/fabxc/tsdb/labels"
2017-02-27 01:46:15 -08:00
"github.com/oklog/ulid"
2017-01-03 06:43:26 -08:00
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
)
// Compactor provides compaction against an underlying storage
// of time series data.
2017-03-02 00:13:29 -08:00
type Compactor interface {
// Plan returns a set of non-overlapping directories that can
// be compacted concurrently.
// Results returned when compactions are in progress are undefined.
Plan(dir string) ([][]string, error)
2017-03-02 00:13:29 -08:00
// Write persists a Block into a directory.
Write(dir string, b Block) error
2017-03-02 00:13:29 -08:00
// Compact runs compaction against the provided directories. Must
// only be called concurrently with results of Plan().
2017-03-02 00:13:29 -08:00
Compact(dirs ...string) error
}
// compactor implements the Compactor interface.
type compactor struct {
2017-01-03 06:43:26 -08:00
metrics *compactorMetrics
2017-01-17 21:18:32 -08:00
opts *compactorOptions
}
2017-01-03 06:43:26 -08:00
type compactorMetrics struct {
ran prometheus.Counter
failed prometheus.Counter
duration prometheus.Histogram
2017-01-03 06:43:26 -08:00
}
func newCompactorMetrics(r prometheus.Registerer) *compactorMetrics {
2017-01-03 06:43:26 -08:00
m := &compactorMetrics{}
m.ran = prometheus.NewCounter(prometheus.CounterOpts{
Name: "tsdb_compactions_total",
Help: "Total number of compactions that were executed for the partition.",
2017-01-03 06:43:26 -08:00
})
m.failed = prometheus.NewCounter(prometheus.CounterOpts{
Name: "tsdb_compactions_failed_total",
Help: "Total number of compactions that failed for the partition.",
2017-01-03 06:43:26 -08:00
})
m.duration = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "tsdb_compaction_duration",
Help: "Duration of compaction runs.",
2017-01-03 06:43:26 -08:00
})
if r != nil {
r.MustRegister(
m.ran,
m.failed,
m.duration,
)
}
2017-01-03 06:43:26 -08:00
return m
}
2017-01-17 21:18:32 -08:00
type compactorOptions struct {
maxBlockRange uint64
}
func newCompactor(r prometheus.Registerer, opts *compactorOptions) *compactor {
2017-01-17 21:18:32 -08:00
return &compactor{
opts: opts,
2017-01-09 10:14:21 -08:00
metrics: newCompactorMetrics(r),
}
}
type compactionInfo struct {
2017-02-27 23:40:51 -08:00
seq int
generation int
mint, maxt int64
}
const compactionBlocksLen = 3
func (c *compactor) Plan(dir string) ([][]string, error) {
dirs, err := blockDirs(dir)
2017-03-02 00:13:29 -08:00
if err != nil {
return nil, err
2017-01-03 06:43:26 -08:00
}
2017-01-17 21:18:32 -08:00
2017-03-02 00:13:29 -08:00
var bs []*BlockMeta
for _, dir := range dirs {
meta, err := readMetaFile(dir)
if err != nil {
return nil, err
}
2017-03-02 00:13:29 -08:00
if meta.Compaction.Generation > 0 {
bs = append(bs, meta)
}
2017-03-02 00:13:29 -08:00
}
if len(bs) == 0 {
return nil, nil
}
2017-03-02 00:13:29 -08:00
sliceDirs := func(i, j int) [][]string {
var res []string
for k := i; k < j; k++ {
res = append(res, dirs[k])
}
return [][]string{res}
}
2017-01-03 06:43:26 -08:00
// Then we care about compacting multiple blocks, starting with the oldest.
2017-03-02 06:10:13 -08:00
for i := 0; i < len(bs)-compactionBlocksLen+1; i++ {
2017-02-27 23:40:51 -08:00
if c.match(bs[i : i+3]) {
2017-03-02 00:13:29 -08:00
return sliceDirs(i, i+compactionBlocksLen), nil
2017-01-03 06:43:26 -08:00
}
}
2017-03-02 00:13:29 -08:00
return nil, nil
}
2017-03-02 00:13:29 -08:00
func (c *compactor) match(bs []*BlockMeta) bool {
g := bs[0].Compaction.Generation
2017-01-19 10:45:52 -08:00
for _, b := range bs {
2017-03-02 00:13:29 -08:00
if b.Compaction.Generation != g {
2017-01-19 10:45:52 -08:00
return false
}
}
2017-03-02 00:13:29 -08:00
return uint64(bs[len(bs)-1].MaxTime-bs[0].MinTime) <= c.opts.maxBlockRange
2017-01-03 01:09:20 -08:00
}
2017-02-27 01:46:15 -08:00
var entropy = rand.New(rand.NewSource(time.Now().UnixNano()))
func mergeBlockMetas(blocks ...Block) (res BlockMeta) {
m0 := blocks[0].Meta()
res.Sequence = m0.Sequence
res.MinTime = m0.MinTime
res.MaxTime = blocks[len(blocks)-1].Meta().MaxTime
2017-02-27 01:46:15 -08:00
res.ULID = ulid.MustNew(ulid.Now(), entropy)
2017-03-02 00:13:29 -08:00
res.Compaction.Generation = m0.Compaction.Generation + 1
2017-01-03 06:43:26 -08:00
for _, b := range blocks {
res.Stats.NumSamples += b.Meta().Stats.NumSamples
}
2017-01-03 06:43:26 -08:00
return res
}
2017-03-02 00:13:29 -08:00
func (c *compactor) Compact(dirs ...string) (err error) {
var blocks []Block
2017-03-02 00:13:29 -08:00
for _, d := range dirs {
b, err := newPersistedBlock(d)
if err != nil {
return err
}
2017-03-06 00:33:55 -08:00
defer b.Close()
2017-03-02 00:13:29 -08:00
blocks = append(blocks, b)
}
return c.write(dirs[0], blocks...)
}
func (c *compactor) Write(dir string, b Block) error {
return c.write(dir, b)
2017-03-02 00:13:29 -08:00
}
// write creates a new block that is the union of the provided blocks into dir.
// It cleans up all files of the old blocks after completing successfully.
func (c *compactor) write(dir string, blocks ...Block) (err error) {
2017-03-02 00:13:29 -08:00
defer func(t time.Time) {
if err != nil {
c.metrics.failed.Inc()
}
2017-03-02 00:13:29 -08:00
c.metrics.duration.Observe(time.Since(t).Seconds())
}(time.Now())
2017-01-03 06:43:26 -08:00
tmp := dir + ".tmp"
if err = os.RemoveAll(tmp); err != nil {
2017-02-19 07:04:37 -08:00
return err
2017-01-03 06:43:26 -08:00
}
2017-02-19 07:04:37 -08:00
if err = os.MkdirAll(tmp, 0777); err != nil {
return err
}
// Populate chunk and index files into temporary directory with
// data of all blocks.
chunkw, err := newChunkWriter(chunkDir(tmp))
if err != nil {
return errors.Wrap(err, "open chunk writer")
}
indexw, err := newIndexWriter(tmp)
if err != nil {
return errors.Wrap(err, "open index writer")
}
2017-01-03 06:43:26 -08:00
meta, err := c.populate(blocks, indexw, chunkw)
if err != nil {
2017-01-03 06:43:26 -08:00
return errors.Wrap(err, "write compaction")
}
if err = writeMetaFile(tmp, meta); err != nil {
return errors.Wrap(err, "write merged meta")
}
2017-01-03 06:43:26 -08:00
if err = chunkw.Close(); err != nil {
2017-01-03 06:43:26 -08:00
return errors.Wrap(err, "close chunk writer")
}
if err = indexw.Close(); err != nil {
2017-01-03 06:43:26 -08:00
return errors.Wrap(err, "close index writer")
}
// Block successfully written, make visible and remove old ones.
if err := renameFile(tmp, dir); err != nil {
return errors.Wrap(err, "rename block dir")
}
for _, b := range blocks[1:] {
if err := os.RemoveAll(b.Dir()); err != nil {
return err
}
}
// Properly sync parent dir to ensure changes are visible.
df, err := fileutil.OpenDir(dir)
if err != nil {
return errors.Wrap(err, "sync block dir")
}
if err := fileutil.Fsync(df); err != nil {
return errors.Wrap(err, "sync block dir")
}
return nil
2017-01-03 06:43:26 -08:00
}
// populate fills the index and chunk writers with new data gathered as the union
// of the provided blocks. It returns meta information for the new block.
func (c *compactor) populate(blocks []Block, indexw IndexWriter, chunkw ChunkWriter) (*BlockMeta, error) {
2017-01-03 06:43:26 -08:00
var set compactionSet
2017-01-17 21:18:32 -08:00
2017-01-03 06:43:26 -08:00
for i, b := range blocks {
all, err := b.Index().Postings("", "")
2017-01-03 06:43:26 -08:00
if err != nil {
return nil, err
2017-01-03 06:43:26 -08:00
}
// TODO(fabxc): find more transparent way of handling this.
if hb, ok := b.(*headBlock); ok {
all = hb.remapPostings(all)
}
s := newCompactionSeriesSet(b.Index(), b.Chunks(), all)
2017-01-03 06:43:26 -08:00
if i == 0 {
set = s
continue
}
set, err = newCompactionMerger(set, s)
if err != nil {
return nil, err
2017-01-03 06:43:26 -08:00
}
}
// We fully rebuild the postings list index from merged series.
var (
postings = &memPostings{m: make(map[term][]uint32, 512)}
values = map[string]stringset{}
i = uint32(0)
meta = mergeBlockMetas(blocks...)
)
for set.Next() {
lset, chunks := set.At()
if err := chunkw.WriteChunks(chunks...); err != nil {
return nil, err
}
indexw.AddSeries(i, lset, chunks...)
meta.Stats.NumChunks += uint64(len(chunks))
meta.Stats.NumSeries++
for _, l := range lset {
valset, ok := values[l.Name]
if !ok {
valset = stringset{}
values[l.Name] = valset
}
valset.set(l.Value)
postings.add(i, term{name: l.Name, value: l.Value})
}
i++
}
if set.Err() != nil {
return nil, set.Err()
}
s := make([]string, 0, 256)
for n, v := range values {
s = s[:0]
for x := range v {
s = append(s, x)
}
2017-01-02 07:58:47 -08:00
if err := indexw.WriteLabelIndex([]string{n}, s); err != nil {
return nil, err
}
}
for t := range postings.m {
2017-01-02 07:58:47 -08:00
if err := indexw.WritePostings(t.name, t.value, postings.get(t)); err != nil {
return nil, err
}
}
// Write a postings list containing all series.
all := make([]uint32, i)
for i := range all {
all[i] = uint32(i)
}
2017-01-02 07:58:47 -08:00
if err := indexw.WritePostings("", "", newListPostings(all)); err != nil {
return nil, err
}
2017-01-19 05:01:38 -08:00
return &meta, nil
}
2017-01-03 06:43:26 -08:00
type compactionSet interface {
Next() bool
At() (labels.Labels, []ChunkMeta)
Err() error
}
type compactionSeriesSet struct {
p Postings
index IndexReader
chunks ChunkReader
l labels.Labels
c []ChunkMeta
err error
}
func newCompactionSeriesSet(i IndexReader, c ChunkReader, p Postings) *compactionSeriesSet {
return &compactionSeriesSet{
index: i,
chunks: c,
p: p,
}
}
func (c *compactionSeriesSet) Next() bool {
if !c.p.Next() {
return false
}
2017-01-02 04:27:52 -08:00
c.l, c.c, c.err = c.index.Series(c.p.At())
if c.err != nil {
return false
}
for i := range c.c {
chk := &c.c[i]
chk.Chunk, c.err = c.chunks.Chunk(chk.Ref)
if c.err != nil {
return false
}
}
return true
}
func (c *compactionSeriesSet) Err() error {
if c.err != nil {
return c.err
}
return c.p.Err()
}
func (c *compactionSeriesSet) At() (labels.Labels, []ChunkMeta) {
return c.l, c.c
}
type compactionMerger struct {
2017-01-03 06:43:26 -08:00
a, b compactionSet
aok, bok bool
l labels.Labels
c []ChunkMeta
}
type compactionSeries struct {
labels labels.Labels
chunks []ChunkMeta
}
2017-01-03 06:43:26 -08:00
func newCompactionMerger(a, b compactionSet) (*compactionMerger, error) {
c := &compactionMerger{
a: a,
b: b,
}
// Initialize first elements of both sets as Next() needs
// one element look-ahead.
c.aok = c.a.Next()
c.bok = c.b.Next()
return c, c.Err()
}
func (c *compactionMerger) compare() int {
if !c.aok {
return 1
}
if !c.bok {
return -1
}
a, _ := c.a.At()
b, _ := c.b.At()
return labels.Compare(a, b)
}
func (c *compactionMerger) Next() bool {
if !c.aok && !c.bok || c.Err() != nil {
return false
}
d := c.compare()
// Both sets contain the current series. Chain them into a single one.
if d > 0 {
c.l, c.c = c.b.At()
c.bok = c.b.Next()
} else if d < 0 {
c.l, c.c = c.a.At()
c.aok = c.a.Next()
} else {
l, ca := c.a.At()
_, cb := c.b.At()
c.l = l
c.c = append(ca, cb...)
c.aok = c.a.Next()
c.bok = c.b.Next()
}
return true
}
func (c *compactionMerger) Err() error {
if c.a.Err() != nil {
return c.a.Err()
}
return c.b.Err()
}
func (c *compactionMerger) At() (labels.Labels, []ChunkMeta) {
return c.l, c.c
}
2017-01-02 05:41:13 -08:00
2017-03-01 08:19:57 -08:00
func renameFile(from, to string) error {
2017-01-02 05:41:13 -08:00
if err := os.RemoveAll(to); err != nil {
return err
}
if err := os.Rename(from, to); err != nil {
return err
}
// Directory was renamed; sync parent dir to persist rename.
pdir, err := fileutil.OpenDir(filepath.Dir(to))
if err != nil {
return err
}
if err = fileutil.Fsync(pdir); err != nil {
return err
}
if err = pdir.Close(); err != nil {
return err
}
return nil
}