2017-04-10 11:59:45 -07:00
|
|
|
// Copyright 2017 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2017-01-02 01:34:55 -08:00
|
|
|
package tsdb
|
|
|
|
|
|
|
|
import (
|
2018-03-15 04:26:11 -07:00
|
|
|
"fmt"
|
2017-10-09 06:21:46 -07:00
|
|
|
"io"
|
2017-02-27 01:46:15 -08:00
|
|
|
"math/rand"
|
2017-01-02 01:34:55 -08:00
|
|
|
"os"
|
2017-01-02 05:41:13 -08:00
|
|
|
"path/filepath"
|
2017-05-18 08:30:52 -07:00
|
|
|
"sort"
|
2017-01-03 06:43:26 -08:00
|
|
|
"time"
|
2017-01-02 01:34:55 -08:00
|
|
|
|
2017-03-20 02:41:43 -07:00
|
|
|
"github.com/go-kit/kit/log"
|
2017-09-28 00:19:34 -07:00
|
|
|
"github.com/go-kit/kit/log/level"
|
2017-02-27 01:46:15 -08:00
|
|
|
"github.com/oklog/ulid"
|
2017-01-03 06:43:26 -08:00
|
|
|
"github.com/pkg/errors"
|
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2017-11-30 06:34:49 -08:00
|
|
|
"github.com/prometheus/tsdb/chunkenc"
|
2017-05-17 02:19:42 -07:00
|
|
|
"github.com/prometheus/tsdb/chunks"
|
2017-10-04 01:42:25 -07:00
|
|
|
"github.com/prometheus/tsdb/fileutil"
|
2017-11-30 06:34:49 -08:00
|
|
|
"github.com/prometheus/tsdb/index"
|
2017-04-04 02:27:26 -07:00
|
|
|
"github.com/prometheus/tsdb/labels"
|
2017-01-02 01:34:55 -08:00
|
|
|
)
|
|
|
|
|
2018-03-15 04:26:11 -07:00
|
|
|
// ExponentialBlockRanges returns the time ranges based on the stepSize.
|
2017-07-07 04:46:41 -07:00
|
|
|
func ExponentialBlockRanges(minSize int64, steps, stepSize int) []int64 {
|
|
|
|
ranges := make([]int64, 0, steps)
|
|
|
|
curRange := minSize
|
|
|
|
for i := 0; i < steps; i++ {
|
|
|
|
ranges = append(ranges, curRange)
|
|
|
|
curRange = curRange * int64(stepSize)
|
|
|
|
}
|
|
|
|
|
|
|
|
return ranges
|
|
|
|
}
|
|
|
|
|
2017-03-02 05:32:09 -08:00
|
|
|
// Compactor provides compaction against an underlying storage
|
|
|
|
// of time series data.
|
2017-03-02 00:13:29 -08:00
|
|
|
type Compactor interface {
|
2017-03-02 05:32:09 -08:00
|
|
|
// Plan returns a set of non-overlapping directories that can
|
|
|
|
// be compacted concurrently.
|
|
|
|
// Results returned when compactions are in progress are undefined.
|
2017-08-09 02:10:29 -07:00
|
|
|
Plan(dir string) ([]string, error)
|
2017-03-02 00:13:29 -08:00
|
|
|
|
2017-03-02 05:32:09 -08:00
|
|
|
// Write persists a Block into a directory.
|
2018-06-27 06:47:11 -07:00
|
|
|
Write(dest string, b BlockReader, mint, maxt int64, parent *BlockMeta) (ulid.ULID, error)
|
2017-03-02 00:13:29 -08:00
|
|
|
|
2017-03-02 05:32:09 -08:00
|
|
|
// Compact runs compaction against the provided directories. Must
|
|
|
|
// only be called concurrently with results of Plan().
|
2017-12-05 02:49:22 -08:00
|
|
|
Compact(dest string, dirs ...string) (ulid.ULID, error)
|
2017-03-02 00:13:29 -08:00
|
|
|
}
|
|
|
|
|
2017-08-09 02:10:29 -07:00
|
|
|
// LeveledCompactor implements the Compactor interface.
|
|
|
|
type LeveledCompactor struct {
|
2017-09-01 02:46:46 -07:00
|
|
|
dir string
|
|
|
|
metrics *compactorMetrics
|
|
|
|
logger log.Logger
|
|
|
|
ranges []int64
|
2017-11-30 06:34:49 -08:00
|
|
|
chunkPool chunkenc.Pool
|
2017-01-02 01:34:55 -08:00
|
|
|
}
|
|
|
|
|
2017-01-03 06:43:26 -08:00
|
|
|
type compactorMetrics struct {
|
2017-09-01 07:10:10 -07:00
|
|
|
ran prometheus.Counter
|
|
|
|
failed prometheus.Counter
|
|
|
|
duration prometheus.Histogram
|
|
|
|
chunkSize prometheus.Histogram
|
|
|
|
chunkSamples prometheus.Histogram
|
|
|
|
chunkRange prometheus.Histogram
|
2017-01-03 06:43:26 -08:00
|
|
|
}
|
|
|
|
|
2017-01-06 02:40:09 -08:00
|
|
|
func newCompactorMetrics(r prometheus.Registerer) *compactorMetrics {
|
2017-01-03 06:43:26 -08:00
|
|
|
m := &compactorMetrics{}
|
|
|
|
|
|
|
|
m.ran = prometheus.NewCounter(prometheus.CounterOpts{
|
2017-10-20 03:32:32 -07:00
|
|
|
Name: "prometheus_tsdb_compactions_total",
|
2017-01-06 02:40:09 -08:00
|
|
|
Help: "Total number of compactions that were executed for the partition.",
|
2017-01-03 06:43:26 -08:00
|
|
|
})
|
|
|
|
m.failed = prometheus.NewCounter(prometheus.CounterOpts{
|
2017-10-20 03:32:32 -07:00
|
|
|
Name: "prometheus_tsdb_compactions_failed_total",
|
2017-01-06 02:40:09 -08:00
|
|
|
Help: "Total number of compactions that failed for the partition.",
|
2017-01-03 06:43:26 -08:00
|
|
|
})
|
2017-09-01 07:10:10 -07:00
|
|
|
m.duration = prometheus.NewHistogram(prometheus.HistogramOpts{
|
2018-02-05 00:36:18 -08:00
|
|
|
Name: "prometheus_tsdb_compaction_duration_seconds",
|
|
|
|
Help: "Duration of compaction runs",
|
2017-09-01 07:10:10 -07:00
|
|
|
Buckets: prometheus.ExponentialBuckets(1, 2, 10),
|
|
|
|
})
|
|
|
|
m.chunkSize = prometheus.NewHistogram(prometheus.HistogramOpts{
|
2017-10-20 03:32:32 -07:00
|
|
|
Name: "prometheus_tsdb_compaction_chunk_size",
|
2017-09-01 07:10:10 -07:00
|
|
|
Help: "Final size of chunks on their first compaction",
|
|
|
|
Buckets: prometheus.ExponentialBuckets(32, 1.5, 12),
|
|
|
|
})
|
|
|
|
m.chunkSamples = prometheus.NewHistogram(prometheus.HistogramOpts{
|
2017-10-20 03:32:32 -07:00
|
|
|
Name: "prometheus_tsdb_compaction_chunk_samples",
|
2017-09-01 07:10:10 -07:00
|
|
|
Help: "Final number of samples on their first compaction",
|
|
|
|
Buckets: prometheus.ExponentialBuckets(4, 1.5, 12),
|
|
|
|
})
|
|
|
|
m.chunkRange = prometheus.NewHistogram(prometheus.HistogramOpts{
|
2017-10-20 03:32:32 -07:00
|
|
|
Name: "prometheus_tsdb_compaction_chunk_range",
|
2017-09-01 07:10:10 -07:00
|
|
|
Help: "Final time range of chunks on their first compaction",
|
|
|
|
Buckets: prometheus.ExponentialBuckets(100, 4, 10),
|
2017-01-03 06:43:26 -08:00
|
|
|
})
|
|
|
|
|
2017-01-06 02:40:09 -08:00
|
|
|
if r != nil {
|
|
|
|
r.MustRegister(
|
|
|
|
m.ran,
|
|
|
|
m.failed,
|
|
|
|
m.duration,
|
2017-09-01 07:10:10 -07:00
|
|
|
m.chunkRange,
|
|
|
|
m.chunkSamples,
|
|
|
|
m.chunkSize,
|
2017-01-06 02:40:09 -08:00
|
|
|
)
|
|
|
|
}
|
2017-01-03 06:43:26 -08:00
|
|
|
return m
|
|
|
|
}
|
|
|
|
|
2017-08-13 01:41:08 -07:00
|
|
|
// NewLeveledCompactor returns a LeveledCompactor.
|
2017-11-30 06:34:49 -08:00
|
|
|
func NewLeveledCompactor(r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool) (*LeveledCompactor, error) {
|
2017-09-01 02:46:46 -07:00
|
|
|
if len(ranges) == 0 {
|
|
|
|
return nil, errors.Errorf("at least one range must be provided")
|
2017-08-08 08:35:34 -07:00
|
|
|
}
|
2017-09-01 02:46:46 -07:00
|
|
|
if pool == nil {
|
2017-11-30 06:34:49 -08:00
|
|
|
pool = chunkenc.NewPool()
|
2017-01-02 01:34:55 -08:00
|
|
|
}
|
2017-09-01 02:46:46 -07:00
|
|
|
return &LeveledCompactor{
|
|
|
|
ranges: ranges,
|
|
|
|
chunkPool: pool,
|
|
|
|
logger: l,
|
|
|
|
metrics: newCompactorMetrics(r),
|
|
|
|
}, nil
|
2017-01-19 22:58:19 -08:00
|
|
|
}
|
|
|
|
|
2017-05-18 07:09:30 -07:00
|
|
|
type dirMeta struct {
|
|
|
|
dir string
|
|
|
|
meta *BlockMeta
|
|
|
|
}
|
|
|
|
|
2017-08-09 02:10:29 -07:00
|
|
|
// Plan returns a list of compactable blocks in the provided directory.
|
|
|
|
func (c *LeveledCompactor) Plan(dir string) ([]string, error) {
|
|
|
|
dirs, err := blockDirs(dir)
|
2017-03-02 00:13:29 -08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-01-03 06:43:26 -08:00
|
|
|
}
|
2018-02-28 03:04:55 -08:00
|
|
|
if len(dirs) < 1 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2017-01-17 21:18:32 -08:00
|
|
|
|
2017-05-18 08:30:52 -07:00
|
|
|
var dms []dirMeta
|
2017-03-02 00:13:29 -08:00
|
|
|
for _, dir := range dirs {
|
|
|
|
meta, err := readMetaFile(dir)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-01-06 04:53:05 -08:00
|
|
|
}
|
2017-09-01 02:46:46 -07:00
|
|
|
dms = append(dms, dirMeta{dir, meta})
|
2017-03-02 00:13:29 -08:00
|
|
|
}
|
2017-08-13 01:41:08 -07:00
|
|
|
return c.plan(dms)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *LeveledCompactor) plan(dms []dirMeta) ([]string, error) {
|
2017-09-01 02:46:46 -07:00
|
|
|
sort.Slice(dms, func(i, j int) bool {
|
|
|
|
return dms[i].meta.MinTime < dms[j].meta.MinTime
|
|
|
|
})
|
2017-01-30 00:42:38 -08:00
|
|
|
|
2018-03-13 07:11:02 -07:00
|
|
|
// We do not include a recently created block with max(minTime), so the block which was just created from WAL.
|
|
|
|
// This gives users a window of a full block size to piece-wise backup new data without having to care about data overlap.
|
|
|
|
dms = dms[:len(dms)-1]
|
|
|
|
|
2017-08-09 02:10:29 -07:00
|
|
|
var res []string
|
|
|
|
for _, dm := range c.selectDirs(dms) {
|
|
|
|
res = append(res, dm.dir)
|
2017-01-06 04:53:05 -08:00
|
|
|
}
|
2017-08-09 02:10:29 -07:00
|
|
|
if len(res) > 0 {
|
|
|
|
return res, nil
|
2017-07-12 09:16:12 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Compact any blocks that have >5% tombstones.
|
|
|
|
for i := len(dms) - 1; i >= 0; i-- {
|
|
|
|
meta := dms[i].meta
|
2017-09-01 02:46:46 -07:00
|
|
|
if meta.MaxTime-meta.MinTime < c.ranges[len(c.ranges)/2] {
|
2017-07-12 09:16:12 -07:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2017-09-01 02:46:46 -07:00
|
|
|
if float64(meta.Stats.NumTombstones)/float64(meta.Stats.NumSeries+1) > 0.05 {
|
2017-08-09 02:10:29 -07:00
|
|
|
return []string{dms[i].dir}, nil
|
2017-07-12 09:16:12 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, nil
|
2017-07-06 12:29:26 -07:00
|
|
|
}
|
|
|
|
|
2017-08-03 09:33:13 -07:00
|
|
|
// selectDirs returns the dir metas that should be compacted into a single new block.
|
|
|
|
// If only a single block range is configured, the result is always nil.
|
2017-08-09 02:10:29 -07:00
|
|
|
func (c *LeveledCompactor) selectDirs(ds []dirMeta) []dirMeta {
|
2017-09-01 02:46:46 -07:00
|
|
|
if len(c.ranges) < 2 || len(ds) < 1 {
|
2017-07-07 04:46:41 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-03 09:33:13 -07:00
|
|
|
highTime := ds[len(ds)-1].meta.MinTime
|
2017-07-06 12:29:26 -07:00
|
|
|
|
2017-09-01 02:46:46 -07:00
|
|
|
for _, iv := range c.ranges[1:] {
|
2017-08-03 09:33:13 -07:00
|
|
|
parts := splitByRange(ds, iv)
|
|
|
|
if len(parts) == 0 {
|
|
|
|
continue
|
2017-01-03 06:43:26 -08:00
|
|
|
}
|
2017-01-30 00:42:38 -08:00
|
|
|
|
2017-11-21 03:15:02 -08:00
|
|
|
Outer:
|
2017-08-03 09:33:13 -07:00
|
|
|
for _, p := range parts {
|
2018-03-15 04:26:11 -07:00
|
|
|
// Do not select the range if it has a block whose compaction failed.
|
2017-11-21 03:15:02 -08:00
|
|
|
for _, dm := range p {
|
|
|
|
if dm.meta.Compaction.Failed {
|
|
|
|
continue Outer
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-03 09:33:13 -07:00
|
|
|
mint := p[0].meta.MinTime
|
|
|
|
maxt := p[len(p)-1].meta.MaxTime
|
|
|
|
// Pick the range of blocks if it spans the full range (potentially with gaps)
|
|
|
|
// or is before the most recent block.
|
|
|
|
// This ensures we don't compact blocks prematurely when another one of the same
|
|
|
|
// size still fits in the range.
|
|
|
|
if (maxt-mint == iv || maxt <= highTime) && len(p) > 1 {
|
|
|
|
return p
|
|
|
|
}
|
2017-07-06 12:29:26 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-03 09:33:13 -07:00
|
|
|
return nil
|
2017-01-04 12:11:15 -08:00
|
|
|
}
|
|
|
|
|
2017-08-03 09:33:13 -07:00
|
|
|
// splitByRange splits the directories by the time range. The range sequence starts at 0.
|
|
|
|
//
|
|
|
|
// For example, if we have blocks [0-10, 10-20, 50-60, 90-100] and the split range tr is 30
|
|
|
|
// it returns [0-10, 10-20], [50-60], [90-100].
|
2017-07-06 12:29:26 -07:00
|
|
|
func splitByRange(ds []dirMeta, tr int64) [][]dirMeta {
|
2017-07-07 04:46:41 -07:00
|
|
|
var splitDirs [][]dirMeta
|
2017-07-06 12:29:26 -07:00
|
|
|
|
2017-07-07 04:46:41 -07:00
|
|
|
for i := 0; i < len(ds); {
|
2017-08-03 09:33:13 -07:00
|
|
|
var (
|
|
|
|
group []dirMeta
|
|
|
|
t0 int64
|
|
|
|
m = ds[i].meta
|
|
|
|
)
|
2017-07-07 04:46:41 -07:00
|
|
|
// Compute start of aligned time range of size tr closest to the current block's start.
|
2017-07-13 07:13:59 -07:00
|
|
|
if m.MinTime >= 0 {
|
|
|
|
t0 = tr * (m.MinTime / tr)
|
|
|
|
} else {
|
|
|
|
t0 = tr * ((m.MinTime - tr + 1) / tr)
|
|
|
|
}
|
|
|
|
// Skip blocks that don't fall into the range. This can happen via mis-alignment or
|
|
|
|
// by being the multiple of the intended range.
|
|
|
|
if ds[i].meta.MinTime < t0 || ds[i].meta.MaxTime > t0+tr {
|
|
|
|
i++
|
|
|
|
continue
|
|
|
|
}
|
2017-01-19 10:45:52 -08:00
|
|
|
|
2017-07-07 04:46:41 -07:00
|
|
|
// Add all dirs to the current group that are within [t0, t0+tr].
|
|
|
|
for ; i < len(ds); i++ {
|
2017-07-13 07:13:59 -07:00
|
|
|
// Either the block falls into the next range or doesn't fit at all (checked above).
|
2017-07-07 04:46:41 -07:00
|
|
|
if ds[i].meta.MinTime < t0 || ds[i].meta.MaxTime > t0+tr {
|
|
|
|
break
|
2017-07-06 12:29:26 -07:00
|
|
|
}
|
2017-07-07 04:46:41 -07:00
|
|
|
group = append(group, ds[i])
|
2017-01-19 10:45:52 -08:00
|
|
|
}
|
2017-07-06 12:29:26 -07:00
|
|
|
|
2017-07-07 04:46:41 -07:00
|
|
|
if len(group) > 0 {
|
|
|
|
splitDirs = append(splitDirs, group)
|
|
|
|
}
|
2017-07-06 12:29:26 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return splitDirs
|
2017-01-03 01:09:20 -08:00
|
|
|
}
|
|
|
|
|
2017-08-28 15:39:17 -07:00
|
|
|
func compactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta {
|
|
|
|
res := &BlockMeta{
|
|
|
|
ULID: uid,
|
|
|
|
MinTime: blocks[0].MinTime,
|
|
|
|
MaxTime: blocks[len(blocks)-1].MaxTime,
|
|
|
|
}
|
2017-01-28 23:11:47 -08:00
|
|
|
|
2017-06-07 00:52:20 -07:00
|
|
|
sources := map[ulid.ULID]struct{}{}
|
2017-01-03 06:43:26 -08:00
|
|
|
|
|
|
|
for _, b := range blocks {
|
2017-08-09 02:10:29 -07:00
|
|
|
if b.Compaction.Level > res.Compaction.Level {
|
|
|
|
res.Compaction.Level = b.Compaction.Level
|
2017-06-07 00:52:20 -07:00
|
|
|
}
|
|
|
|
for _, s := range b.Compaction.Sources {
|
|
|
|
sources[s] = struct{}{}
|
|
|
|
}
|
2018-06-27 06:47:11 -07:00
|
|
|
res.Compaction.Parents = append(res.Compaction.Parents, BlockDesc{
|
|
|
|
ULID: b.ULID,
|
|
|
|
MinTime: b.MinTime,
|
|
|
|
MaxTime: b.MaxTime,
|
|
|
|
})
|
2017-01-02 01:34:55 -08:00
|
|
|
}
|
2017-08-09 02:10:29 -07:00
|
|
|
res.Compaction.Level++
|
2017-06-07 00:52:20 -07:00
|
|
|
|
|
|
|
for s := range sources {
|
|
|
|
res.Compaction.Sources = append(res.Compaction.Sources, s)
|
|
|
|
}
|
|
|
|
sort.Slice(res.Compaction.Sources, func(i, j int) bool {
|
|
|
|
return res.Compaction.Sources[i].Compare(res.Compaction.Sources[j]) < 0
|
|
|
|
})
|
|
|
|
|
2017-01-03 06:43:26 -08:00
|
|
|
return res
|
|
|
|
}
|
2017-01-02 01:34:55 -08:00
|
|
|
|
2017-08-09 02:10:29 -07:00
|
|
|
// Compact creates a new block in the compactor's directory from the blocks in the
|
|
|
|
// provided directories.
|
2017-12-05 02:49:22 -08:00
|
|
|
func (c *LeveledCompactor) Compact(dest string, dirs ...string) (uid ulid.ULID, err error) {
|
2018-03-15 04:26:11 -07:00
|
|
|
var (
|
|
|
|
blocks []BlockReader
|
|
|
|
bs []*Block
|
|
|
|
metas []*BlockMeta
|
|
|
|
uids []string
|
|
|
|
)
|
2017-03-02 05:32:09 -08:00
|
|
|
|
2017-03-02 00:13:29 -08:00
|
|
|
for _, d := range dirs {
|
2017-10-09 06:21:46 -07:00
|
|
|
b, err := OpenBlock(d, c.chunkPool)
|
2017-03-02 00:13:29 -08:00
|
|
|
if err != nil {
|
2017-12-05 02:49:22 -08:00
|
|
|
return uid, err
|
2017-03-02 00:13:29 -08:00
|
|
|
}
|
2017-03-06 00:33:55 -08:00
|
|
|
defer b.Close()
|
|
|
|
|
2017-08-28 15:39:17 -07:00
|
|
|
meta, err := readMetaFile(d)
|
|
|
|
if err != nil {
|
2017-12-05 02:49:22 -08:00
|
|
|
return uid, err
|
2017-08-28 15:39:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
metas = append(metas, meta)
|
2017-03-02 00:13:29 -08:00
|
|
|
blocks = append(blocks, b)
|
2017-11-21 03:15:02 -08:00
|
|
|
bs = append(bs, b)
|
2018-03-15 04:26:11 -07:00
|
|
|
uids = append(uids, meta.ULID.String())
|
2017-03-02 00:13:29 -08:00
|
|
|
}
|
|
|
|
|
2017-05-18 07:09:30 -07:00
|
|
|
entropy := rand.New(rand.NewSource(time.Now().UnixNano()))
|
2017-12-05 02:49:22 -08:00
|
|
|
uid = ulid.MustNew(ulid.Now(), entropy)
|
2017-05-18 07:09:30 -07:00
|
|
|
|
2018-03-15 04:26:11 -07:00
|
|
|
meta := compactBlockMetas(uid, metas...)
|
|
|
|
err = c.write(dest, meta, blocks...)
|
2017-11-21 03:15:02 -08:00
|
|
|
if err == nil {
|
2018-03-15 04:26:11 -07:00
|
|
|
level.Info(c.logger).Log(
|
|
|
|
"msg", "compact blocks",
|
|
|
|
"count", len(blocks),
|
|
|
|
"mint", meta.MinTime,
|
|
|
|
"maxt", meta.MaxTime,
|
|
|
|
"ulid", meta.ULID,
|
|
|
|
"sources", fmt.Sprintf("%v", uids),
|
|
|
|
)
|
2017-12-05 02:49:22 -08:00
|
|
|
return uid, nil
|
2017-11-21 03:15:02 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
var merr MultiError
|
|
|
|
merr.Add(err)
|
|
|
|
|
|
|
|
for _, b := range bs {
|
|
|
|
if err := b.setCompactionFailed(); err != nil {
|
|
|
|
merr.Add(errors.Wrapf(err, "setting compaction failed for block: %s", b.Dir()))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-05 02:49:22 -08:00
|
|
|
return uid, merr
|
2017-03-02 05:32:09 -08:00
|
|
|
}
|
|
|
|
|
2018-06-27 06:47:11 -07:00
|
|
|
func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, parent *BlockMeta) (ulid.ULID, error) {
|
2017-05-18 07:09:30 -07:00
|
|
|
entropy := rand.New(rand.NewSource(time.Now().UnixNano()))
|
|
|
|
uid := ulid.MustNew(ulid.Now(), entropy)
|
|
|
|
|
2017-08-28 15:39:17 -07:00
|
|
|
meta := &BlockMeta{
|
|
|
|
ULID: uid,
|
|
|
|
MinTime: mint,
|
|
|
|
MaxTime: maxt,
|
|
|
|
}
|
|
|
|
meta.Compaction.Level = 1
|
|
|
|
meta.Compaction.Sources = []ulid.ULID{uid}
|
|
|
|
|
2018-06-27 06:47:11 -07:00
|
|
|
if parent != nil {
|
|
|
|
meta.Compaction.Parents = []BlockDesc{
|
|
|
|
{ULID: parent.ULID, MinTime: parent.MinTime, MaxTime: parent.MaxTime},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-15 04:26:11 -07:00
|
|
|
err := c.write(dest, meta, b)
|
|
|
|
if err != nil {
|
|
|
|
return uid, err
|
|
|
|
}
|
|
|
|
|
|
|
|
level.Info(c.logger).Log("msg", "write block", "mint", meta.MinTime, "maxt", meta.MaxTime, "ulid", meta.ULID)
|
|
|
|
return uid, nil
|
2017-03-02 00:13:29 -08:00
|
|
|
}
|
|
|
|
|
2017-09-01 07:10:10 -07:00
|
|
|
// instrumentedChunkWriter is used for level 1 compactions to record statistics
|
|
|
|
// about compacted chunks.
|
|
|
|
type instrumentedChunkWriter struct {
|
|
|
|
ChunkWriter
|
|
|
|
|
|
|
|
size prometheus.Histogram
|
|
|
|
samples prometheus.Histogram
|
|
|
|
trange prometheus.Histogram
|
|
|
|
}
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
func (w *instrumentedChunkWriter) WriteChunks(chunks ...chunks.Meta) error {
|
2017-09-01 07:10:10 -07:00
|
|
|
for _, c := range chunks {
|
|
|
|
w.size.Observe(float64(len(c.Chunk.Bytes())))
|
|
|
|
w.samples.Observe(float64(c.Chunk.NumSamples()))
|
|
|
|
w.trange.Observe(float64(c.MaxTime - c.MinTime))
|
|
|
|
}
|
|
|
|
return w.ChunkWriter.WriteChunks(chunks...)
|
|
|
|
}
|
|
|
|
|
2017-03-02 05:32:09 -08:00
|
|
|
// write creates a new block that is the union of the provided blocks into dir.
|
|
|
|
// It cleans up all files of the old blocks after completing successfully.
|
2017-08-28 15:39:17 -07:00
|
|
|
func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockReader) (err error) {
|
2017-11-21 03:15:02 -08:00
|
|
|
dir := filepath.Join(dest, meta.ULID.String())
|
|
|
|
tmp := dir + ".tmp"
|
|
|
|
|
2017-03-02 00:13:29 -08:00
|
|
|
defer func(t time.Time) {
|
2017-01-06 03:37:28 -08:00
|
|
|
if err != nil {
|
|
|
|
c.metrics.failed.Inc()
|
2017-11-21 03:15:02 -08:00
|
|
|
// TODO(gouthamve): Handle error how?
|
|
|
|
if err := os.RemoveAll(tmp); err != nil {
|
|
|
|
level.Error(c.logger).Log("msg", "removed tmp folder after failed compaction", "err", err.Error())
|
|
|
|
}
|
2017-01-06 03:37:28 -08:00
|
|
|
}
|
2017-05-26 06:13:03 -07:00
|
|
|
c.metrics.ran.Inc()
|
2017-03-02 00:13:29 -08:00
|
|
|
c.metrics.duration.Observe(time.Since(t).Seconds())
|
|
|
|
}(time.Now())
|
2017-01-03 06:43:26 -08:00
|
|
|
|
2017-03-02 05:32:09 -08:00
|
|
|
if err = os.RemoveAll(tmp); err != nil {
|
2017-02-19 07:04:37 -08:00
|
|
|
return err
|
2017-01-03 06:43:26 -08:00
|
|
|
}
|
2017-02-19 07:04:37 -08:00
|
|
|
|
2017-03-02 05:32:09 -08:00
|
|
|
if err = os.MkdirAll(tmp, 0777); err != nil {
|
2017-01-02 01:34:55 -08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-03-02 05:35:06 -08:00
|
|
|
// Populate chunk and index files into temporary directory with
|
|
|
|
// data of all blocks.
|
2017-09-01 07:10:10 -07:00
|
|
|
var chunkw ChunkWriter
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
chunkw, err = chunks.NewWriter(chunkDir(tmp))
|
2017-02-23 01:50:22 -08:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "open chunk writer")
|
|
|
|
}
|
2017-09-01 07:10:10 -07:00
|
|
|
// Record written chunk sizes on level 1 compactions.
|
|
|
|
if meta.Compaction.Level == 1 {
|
|
|
|
chunkw = &instrumentedChunkWriter{
|
|
|
|
ChunkWriter: chunkw,
|
|
|
|
size: c.metrics.chunkSize,
|
|
|
|
samples: c.metrics.chunkSamples,
|
|
|
|
trange: c.metrics.chunkRange,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
indexw, err := index.NewWriter(filepath.Join(tmp, indexFilename))
|
2017-02-24 22:24:20 -08:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "open index writer")
|
|
|
|
}
|
2017-01-03 06:43:26 -08:00
|
|
|
|
2017-08-28 15:39:17 -07:00
|
|
|
if err := c.populateBlock(blocks, meta, indexw, chunkw); err != nil {
|
2017-01-03 06:43:26 -08:00
|
|
|
return errors.Wrap(err, "write compaction")
|
|
|
|
}
|
2017-05-18 07:09:30 -07:00
|
|
|
|
2017-03-02 05:32:09 -08:00
|
|
|
if err = writeMetaFile(tmp, meta); err != nil {
|
|
|
|
return errors.Wrap(err, "write merged meta")
|
|
|
|
}
|
2017-01-03 06:43:26 -08:00
|
|
|
|
2017-01-06 03:37:28 -08:00
|
|
|
if err = chunkw.Close(); err != nil {
|
2017-01-03 06:43:26 -08:00
|
|
|
return errors.Wrap(err, "close chunk writer")
|
|
|
|
}
|
2017-01-06 03:37:28 -08:00
|
|
|
if err = indexw.Close(); err != nil {
|
2017-01-03 06:43:26 -08:00
|
|
|
return errors.Wrap(err, "close index writer")
|
|
|
|
}
|
2017-03-02 05:32:09 -08:00
|
|
|
|
2017-05-14 02:06:26 -07:00
|
|
|
// Create an empty tombstones file.
|
2018-06-08 04:52:01 -07:00
|
|
|
if err := writeTombstoneFile(tmp, NewMemTombstones()); err != nil {
|
2017-05-26 04:01:45 -07:00
|
|
|
return errors.Wrap(err, "write new tombstones file")
|
2017-05-14 02:06:26 -07:00
|
|
|
}
|
|
|
|
|
2017-10-04 01:42:25 -07:00
|
|
|
df, err := fileutil.OpenDir(tmp)
|
2017-03-02 05:35:06 -08:00
|
|
|
if err != nil {
|
2017-10-04 01:42:25 -07:00
|
|
|
return errors.Wrap(err, "open temporary block dir")
|
2017-03-02 05:35:06 -08:00
|
|
|
}
|
2017-10-31 07:37:41 -07:00
|
|
|
defer func() {
|
|
|
|
if df != nil {
|
|
|
|
df.Close()
|
|
|
|
}
|
|
|
|
}()
|
2017-06-11 15:05:04 -07:00
|
|
|
|
2017-03-02 05:35:06 -08:00
|
|
|
if err := fileutil.Fsync(df); err != nil {
|
2017-10-04 01:42:25 -07:00
|
|
|
return errors.Wrap(err, "sync temporary dir file")
|
2017-03-02 05:35:06 -08:00
|
|
|
}
|
2017-03-02 05:32:09 -08:00
|
|
|
|
2018-03-15 04:26:11 -07:00
|
|
|
// Close temp dir before rename block dir (for windows platform).
|
2017-10-31 07:37:41 -07:00
|
|
|
if err = df.Close(); err != nil {
|
|
|
|
return errors.Wrap(err, "close temporary dir")
|
|
|
|
}
|
|
|
|
df = nil
|
|
|
|
|
2017-10-04 01:42:25 -07:00
|
|
|
// Block successfully written, make visible and remove old ones.
|
|
|
|
if err := renameFile(tmp, dir); err != nil {
|
|
|
|
return errors.Wrap(err, "rename block dir")
|
|
|
|
}
|
2018-03-15 04:26:11 -07:00
|
|
|
|
2017-01-06 03:37:28 -08:00
|
|
|
return nil
|
2017-01-03 06:43:26 -08:00
|
|
|
}
|
|
|
|
|
2017-06-05 01:18:31 -07:00
|
|
|
// populateBlock fills the index and chunk writers with new data gathered as the union
|
2017-03-02 05:32:09 -08:00
|
|
|
// of the provided blocks. It returns meta information for the new block.
|
2017-08-28 15:39:17 -07:00
|
|
|
func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) error {
|
2017-08-05 04:31:48 -07:00
|
|
|
var (
|
2017-11-13 04:57:10 -08:00
|
|
|
set ChunkSeriesSet
|
2017-08-05 04:31:48 -07:00
|
|
|
allSymbols = make(map[string]struct{}, 1<<16)
|
2017-10-09 06:21:46 -07:00
|
|
|
closers = []io.Closer{}
|
2017-08-05 04:31:48 -07:00
|
|
|
)
|
2017-10-09 06:21:46 -07:00
|
|
|
defer func() { closeAll(closers...) }()
|
|
|
|
|
2017-01-03 06:43:26 -08:00
|
|
|
for i, b := range blocks {
|
2017-10-09 06:21:46 -07:00
|
|
|
indexr, err := b.Index()
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "open index reader for block %s", b)
|
|
|
|
}
|
|
|
|
closers = append(closers, indexr)
|
|
|
|
|
|
|
|
chunkr, err := b.Chunks()
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "open chunk reader for block %s", b)
|
|
|
|
}
|
|
|
|
closers = append(closers, chunkr)
|
2017-06-07 04:42:53 -07:00
|
|
|
|
2017-10-09 06:21:46 -07:00
|
|
|
tombsr, err := b.Tombstones()
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "open tombstone reader for block %s", b)
|
|
|
|
}
|
|
|
|
closers = append(closers, tombsr)
|
|
|
|
|
|
|
|
symbols, err := indexr.Symbols()
|
2017-08-05 04:31:48 -07:00
|
|
|
if err != nil {
|
2017-08-28 15:39:17 -07:00
|
|
|
return errors.Wrap(err, "read symbols")
|
2017-08-05 04:31:48 -07:00
|
|
|
}
|
|
|
|
for s := range symbols {
|
|
|
|
allSymbols[s] = struct{}{}
|
|
|
|
}
|
|
|
|
|
2017-12-22 00:43:34 -08:00
|
|
|
all, err := indexr.Postings(index.AllPostingsKey())
|
2017-01-03 06:43:26 -08:00
|
|
|
if err != nil {
|
2017-08-28 15:39:17 -07:00
|
|
|
return err
|
2017-01-03 06:43:26 -08:00
|
|
|
}
|
2017-08-05 04:31:48 -07:00
|
|
|
all = indexr.SortedPostings(all)
|
|
|
|
|
2017-10-09 06:21:46 -07:00
|
|
|
s := newCompactionSeriesSet(indexr, chunkr, tombsr, all)
|
2017-01-03 06:43:26 -08:00
|
|
|
|
|
|
|
if i == 0 {
|
|
|
|
set = s
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
set, err = newCompactionMerger(set, s)
|
|
|
|
if err != nil {
|
2017-08-28 15:39:17 -07:00
|
|
|
return err
|
2017-01-03 06:43:26 -08:00
|
|
|
}
|
2017-01-02 02:12:28 -08:00
|
|
|
}
|
|
|
|
|
2017-01-02 01:34:55 -08:00
|
|
|
// We fully rebuild the postings list index from merged series.
|
|
|
|
var (
|
2017-11-30 06:34:49 -08:00
|
|
|
postings = index.NewMemPostings()
|
2017-01-02 01:34:55 -08:00
|
|
|
values = map[string]stringset{}
|
2017-09-04 07:08:38 -07:00
|
|
|
i = uint64(0)
|
2017-01-02 01:34:55 -08:00
|
|
|
)
|
|
|
|
|
2017-08-05 04:31:48 -07:00
|
|
|
if err := indexw.AddSymbols(allSymbols); err != nil {
|
2017-08-28 15:39:17 -07:00
|
|
|
return errors.Wrap(err, "add symbols")
|
2017-08-05 04:31:48 -07:00
|
|
|
}
|
|
|
|
|
2017-01-02 01:34:55 -08:00
|
|
|
for set.Next() {
|
2017-05-22 04:12:36 -07:00
|
|
|
lset, chks, dranges := set.At() // The chunks here are not fully deleted.
|
2017-05-17 02:19:42 -07:00
|
|
|
|
2017-07-12 09:16:12 -07:00
|
|
|
// Skip the series with all deleted chunks.
|
|
|
|
if len(chks) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-05-22 04:12:36 -07:00
|
|
|
if len(dranges) > 0 {
|
2017-05-17 02:19:42 -07:00
|
|
|
// Re-encode the chunk to not have deleted values.
|
2017-11-22 04:28:06 -08:00
|
|
|
for i, chk := range chks {
|
2018-07-02 01:23:36 -07:00
|
|
|
if !chk.OverlapsClosedInterval(dranges[0].Mint, dranges[len(dranges)-1].Maxt) {
|
2017-11-22 04:28:06 -08:00
|
|
|
continue
|
2017-05-17 02:19:42 -07:00
|
|
|
}
|
2017-11-22 04:28:06 -08:00
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
newChunk := chunkenc.NewXORChunk()
|
2017-11-22 04:28:06 -08:00
|
|
|
app, err := newChunk.Appender()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
it := &deletedIterator{it: chk.Chunk.Iterator(), intervals: dranges}
|
|
|
|
for it.Next() {
|
|
|
|
ts, v := it.At()
|
|
|
|
app.Append(ts, v)
|
|
|
|
}
|
|
|
|
|
|
|
|
chks[i].Chunk = newChunk
|
2017-05-17 02:19:42 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if err := chunkw.WriteChunks(chks...); err != nil {
|
2017-08-28 15:39:17 -07:00
|
|
|
return errors.Wrap(err, "write chunks")
|
2017-01-02 01:34:55 -08:00
|
|
|
}
|
|
|
|
|
2017-08-05 04:31:48 -07:00
|
|
|
if err := indexw.AddSeries(i, lset, chks...); err != nil {
|
2017-08-28 15:39:17 -07:00
|
|
|
return errors.Wrap(err, "add series")
|
2017-08-05 04:31:48 -07:00
|
|
|
}
|
2017-02-19 02:27:31 -08:00
|
|
|
|
2017-05-17 02:19:42 -07:00
|
|
|
meta.Stats.NumChunks += uint64(len(chks))
|
2017-01-19 02:22:47 -08:00
|
|
|
meta.Stats.NumSeries++
|
2017-07-12 09:16:12 -07:00
|
|
|
for _, chk := range chks {
|
2017-07-12 09:31:26 -07:00
|
|
|
meta.Stats.NumSamples += uint64(chk.Chunk.NumSamples())
|
2017-07-12 09:16:12 -07:00
|
|
|
}
|
2017-01-02 01:34:55 -08:00
|
|
|
|
2017-08-08 08:35:34 -07:00
|
|
|
for _, chk := range chks {
|
2017-09-01 02:46:46 -07:00
|
|
|
c.chunkPool.Put(chk.Chunk)
|
2017-08-08 08:35:34 -07:00
|
|
|
}
|
|
|
|
|
2017-01-02 01:34:55 -08:00
|
|
|
for _, l := range lset {
|
|
|
|
valset, ok := values[l.Name]
|
|
|
|
if !ok {
|
|
|
|
valset = stringset{}
|
|
|
|
values[l.Name] = valset
|
|
|
|
}
|
|
|
|
valset.set(l.Value)
|
|
|
|
}
|
2017-11-30 06:34:49 -08:00
|
|
|
postings.Add(i, lset)
|
2017-09-05 02:45:18 -07:00
|
|
|
|
2017-01-02 01:34:55 -08:00
|
|
|
i++
|
|
|
|
}
|
|
|
|
if set.Err() != nil {
|
2017-08-28 15:39:17 -07:00
|
|
|
return errors.Wrap(set.Err(), "iterate compaction set")
|
2017-01-02 01:34:55 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
s := make([]string, 0, 256)
|
|
|
|
for n, v := range values {
|
|
|
|
s = s[:0]
|
|
|
|
|
|
|
|
for x := range v {
|
|
|
|
s = append(s, x)
|
|
|
|
}
|
2017-01-02 07:58:47 -08:00
|
|
|
if err := indexw.WriteLabelIndex([]string{n}, s); err != nil {
|
2017-08-28 15:39:17 -07:00
|
|
|
return errors.Wrap(err, "write label index")
|
2017-01-02 01:34:55 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
for _, l := range postings.SortedKeys() {
|
|
|
|
if err := indexw.WritePostings(l.Name, l.Value, postings.Get(l.Name, l.Value)); err != nil {
|
2017-08-28 15:39:17 -07:00
|
|
|
return errors.Wrap(err, "write postings")
|
2017-01-02 01:34:55 -08:00
|
|
|
}
|
|
|
|
}
|
2017-08-28 15:39:17 -07:00
|
|
|
return nil
|
2017-01-02 01:34:55 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
type compactionSeriesSet struct {
|
2017-11-30 06:34:49 -08:00
|
|
|
p index.Postings
|
2017-05-17 02:19:42 -07:00
|
|
|
index IndexReader
|
|
|
|
chunks ChunkReader
|
|
|
|
tombstones TombstoneReader
|
|
|
|
|
2017-05-22 04:12:36 -07:00
|
|
|
l labels.Labels
|
2017-11-30 06:34:49 -08:00
|
|
|
c []chunks.Meta
|
2017-08-25 01:11:46 -07:00
|
|
|
intervals Intervals
|
2017-05-22 04:12:36 -07:00
|
|
|
err error
|
2017-01-02 01:34:55 -08:00
|
|
|
}
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
func newCompactionSeriesSet(i IndexReader, c ChunkReader, t TombstoneReader, p index.Postings) *compactionSeriesSet {
|
2017-01-02 01:34:55 -08:00
|
|
|
return &compactionSeriesSet{
|
2017-05-17 02:19:42 -07:00
|
|
|
index: i,
|
|
|
|
chunks: c,
|
|
|
|
tombstones: t,
|
|
|
|
p: p,
|
2017-01-02 01:34:55 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *compactionSeriesSet) Next() bool {
|
|
|
|
if !c.p.Next() {
|
|
|
|
return false
|
|
|
|
}
|
2017-08-28 15:39:17 -07:00
|
|
|
var err error
|
|
|
|
|
2017-11-13 04:32:24 -08:00
|
|
|
c.intervals, err = c.tombstones.Get(c.p.At())
|
|
|
|
if err != nil {
|
|
|
|
c.err = errors.Wrap(err, "get tombstones")
|
|
|
|
return false
|
|
|
|
}
|
2017-05-23 22:54:24 -07:00
|
|
|
|
2017-08-28 15:39:17 -07:00
|
|
|
if err = c.index.Series(c.p.At(), &c.l, &c.c); err != nil {
|
|
|
|
c.err = errors.Wrapf(err, "get series %d", c.p.At())
|
2017-01-02 01:34:55 -08:00
|
|
|
return false
|
|
|
|
}
|
2017-05-17 02:19:42 -07:00
|
|
|
|
2017-05-21 10:50:05 -07:00
|
|
|
// Remove completely deleted chunks.
|
2017-05-22 04:12:36 -07:00
|
|
|
if len(c.intervals) > 0 {
|
2017-11-30 06:34:49 -08:00
|
|
|
chks := make([]chunks.Meta, 0, len(c.c))
|
2017-05-17 02:19:42 -07:00
|
|
|
for _, chk := range c.c {
|
2017-08-25 01:11:46 -07:00
|
|
|
if !(Interval{chk.MinTime, chk.MaxTime}.isSubrange(c.intervals)) {
|
2017-05-17 02:19:42 -07:00
|
|
|
chks = append(chks, chk)
|
|
|
|
}
|
2017-01-02 01:34:55 -08:00
|
|
|
}
|
2017-05-17 02:19:42 -07:00
|
|
|
|
|
|
|
c.c = chks
|
2017-01-02 01:34:55 -08:00
|
|
|
}
|
|
|
|
|
2017-08-06 11:41:24 -07:00
|
|
|
for i := range c.c {
|
|
|
|
chk := &c.c[i]
|
|
|
|
|
2017-08-28 15:39:17 -07:00
|
|
|
chk.Chunk, err = c.chunks.Chunk(chk.Ref)
|
|
|
|
if err != nil {
|
|
|
|
c.err = errors.Wrapf(err, "chunk %d not found", chk.Ref)
|
2017-01-02 01:34:55 -08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *compactionSeriesSet) Err() error {
|
|
|
|
if c.err != nil {
|
|
|
|
return c.err
|
|
|
|
}
|
|
|
|
return c.p.Err()
|
|
|
|
}
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
func (c *compactionSeriesSet) At() (labels.Labels, []chunks.Meta, Intervals) {
|
2017-05-22 04:12:36 -07:00
|
|
|
return c.l, c.c, c.intervals
|
2017-01-02 01:34:55 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
type compactionMerger struct {
|
2017-11-13 04:57:10 -08:00
|
|
|
a, b ChunkSeriesSet
|
2017-01-02 01:34:55 -08:00
|
|
|
|
2017-05-22 04:12:36 -07:00
|
|
|
aok, bok bool
|
|
|
|
l labels.Labels
|
2017-11-30 06:34:49 -08:00
|
|
|
c []chunks.Meta
|
2017-08-25 01:11:46 -07:00
|
|
|
intervals Intervals
|
2017-01-02 01:34:55 -08:00
|
|
|
}
|
|
|
|
|
2017-11-13 04:57:10 -08:00
|
|
|
func newCompactionMerger(a, b ChunkSeriesSet) (*compactionMerger, error) {
|
2017-01-02 01:34:55 -08:00
|
|
|
c := &compactionMerger{
|
|
|
|
a: a,
|
|
|
|
b: b,
|
|
|
|
}
|
|
|
|
// Initialize first elements of both sets as Next() needs
|
|
|
|
// one element look-ahead.
|
2017-01-04 12:11:15 -08:00
|
|
|
c.aok = c.a.Next()
|
|
|
|
c.bok = c.b.Next()
|
2017-01-02 01:34:55 -08:00
|
|
|
|
|
|
|
return c, c.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *compactionMerger) compare() int {
|
2017-01-04 12:11:15 -08:00
|
|
|
if !c.aok {
|
2017-01-02 01:34:55 -08:00
|
|
|
return 1
|
|
|
|
}
|
2017-01-04 12:11:15 -08:00
|
|
|
if !c.bok {
|
2017-01-02 01:34:55 -08:00
|
|
|
return -1
|
|
|
|
}
|
2017-05-17 02:19:42 -07:00
|
|
|
a, _, _ := c.a.At()
|
|
|
|
b, _, _ := c.b.At()
|
2017-01-02 01:34:55 -08:00
|
|
|
return labels.Compare(a, b)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *compactionMerger) Next() bool {
|
2017-01-04 12:11:15 -08:00
|
|
|
if !c.aok && !c.bok || c.Err() != nil {
|
2017-01-02 01:34:55 -08:00
|
|
|
return false
|
|
|
|
}
|
2017-08-05 04:31:48 -07:00
|
|
|
// While advancing child iterators the memory used for labels and chunks
|
|
|
|
// may be reused. When picking a series we have to store the result.
|
|
|
|
var lset labels.Labels
|
2017-11-30 06:34:49 -08:00
|
|
|
var chks []chunks.Meta
|
2017-01-02 01:34:55 -08:00
|
|
|
|
|
|
|
d := c.compare()
|
|
|
|
// Both sets contain the current series. Chain them into a single one.
|
|
|
|
if d > 0 {
|
2017-08-05 04:31:48 -07:00
|
|
|
lset, chks, c.intervals = c.b.At()
|
|
|
|
c.l = append(c.l[:0], lset...)
|
|
|
|
c.c = append(c.c[:0], chks...)
|
|
|
|
|
2017-01-04 12:11:15 -08:00
|
|
|
c.bok = c.b.Next()
|
2017-01-02 01:34:55 -08:00
|
|
|
} else if d < 0 {
|
2017-08-05 04:31:48 -07:00
|
|
|
lset, chks, c.intervals = c.a.At()
|
|
|
|
c.l = append(c.l[:0], lset...)
|
|
|
|
c.c = append(c.c[:0], chks...)
|
|
|
|
|
2017-01-04 12:11:15 -08:00
|
|
|
c.aok = c.a.Next()
|
2017-01-02 01:34:55 -08:00
|
|
|
} else {
|
2017-05-17 02:19:42 -07:00
|
|
|
l, ca, ra := c.a.At()
|
|
|
|
_, cb, rb := c.b.At()
|
|
|
|
for _, r := range rb {
|
2017-05-22 04:12:36 -07:00
|
|
|
ra = ra.add(r)
|
2017-05-17 02:19:42 -07:00
|
|
|
}
|
2017-01-02 01:34:55 -08:00
|
|
|
|
2017-08-05 04:31:48 -07:00
|
|
|
c.l = append(c.l[:0], l...)
|
|
|
|
c.c = append(append(c.c[:0], ca...), cb...)
|
2017-05-22 04:12:36 -07:00
|
|
|
c.intervals = ra
|
2017-01-02 01:34:55 -08:00
|
|
|
|
2017-01-04 12:11:15 -08:00
|
|
|
c.aok = c.a.Next()
|
|
|
|
c.bok = c.b.Next()
|
2017-01-02 01:34:55 -08:00
|
|
|
}
|
2017-08-08 08:35:34 -07:00
|
|
|
|
2017-01-02 01:34:55 -08:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *compactionMerger) Err() error {
|
|
|
|
if c.a.Err() != nil {
|
|
|
|
return c.a.Err()
|
|
|
|
}
|
|
|
|
return c.b.Err()
|
|
|
|
}
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
func (c *compactionMerger) At() (labels.Labels, []chunks.Meta, Intervals) {
|
2017-05-22 04:12:36 -07:00
|
|
|
return c.l, c.c, c.intervals
|
2017-01-02 01:34:55 -08:00
|
|
|
}
|
2017-01-02 05:41:13 -08:00
|
|
|
|
2017-03-01 08:19:57 -08:00
|
|
|
func renameFile(from, to string) error {
|
2017-01-02 05:41:13 -08:00
|
|
|
if err := os.RemoveAll(to); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := os.Rename(from, to); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Directory was renamed; sync parent dir to persist rename.
|
|
|
|
pdir, err := fileutil.OpenDir(filepath.Dir(to))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-06-11 15:05:04 -07:00
|
|
|
|
2017-01-02 05:41:13 -08:00
|
|
|
if err = fileutil.Fsync(pdir); err != nil {
|
2017-10-04 01:42:25 -07:00
|
|
|
pdir.Close()
|
2017-01-02 05:41:13 -08:00
|
|
|
return err
|
|
|
|
}
|
2017-10-04 01:42:25 -07:00
|
|
|
return pdir.Close()
|
2017-01-02 05:41:13 -08:00
|
|
|
}
|