vendor: update tsdb

This commit is contained in:
Fabian Reinartz 2018-03-08 10:49:35 +01:00
parent 1f71caf23f
commit 07299cec17
4 changed files with 19 additions and 12 deletions

View file

@ -151,6 +151,13 @@ func (c *LeveledCompactor) Plan(dir string) ([]string, error) {
if err != nil {
return nil, err
}
// We do not include the most recently created block. This gives users a window
// of a full block size to piece-wise backup new data without having to care
// about data overlap.
if len(dirs) < 1 {
return nil, nil
}
dirs = dirs[:len(dirs)-1]
var dms []dirMeta

View file

@ -42,7 +42,7 @@ import (
)
// DefaultOptions used for the DB. They are sane for setups using
// millisecond precision timestampdb.
// millisecond precision timestamps.
var DefaultOptions = &Options{
WALFlushInterval: 5 * time.Second,
RetentionDuration: 15 * 24 * 60 * 60 * 1000, // 15 days in milliseconds
@ -633,8 +633,9 @@ func (db *DB) EnableCompactions() {
level.Info(db.logger).Log("msg", "compactions enabled")
}
// Snapshot writes the current data to the directory.
func (db *DB) Snapshot(dir string) error {
// Snapshot writes the current data to the directory. If withHead is set to true it
// will create a new block containing all data that's currently in the memory buffer/WAL.
func (db *DB) Snapshot(dir string, withHead bool) error {
if dir == db.dir {
return errors.Errorf("cannot snapshot into base directory")
}
@ -655,6 +656,9 @@ func (db *DB) Snapshot(dir string) error {
return errors.Wrapf(err, "error snapshotting block: %s", b.Dir())
}
}
if !withHead {
return nil
}
_, err := db.compactor.Write(dir, db.head, db.head.MinTime(), db.head.MaxTime())
return errors.Wrap(err, "snapshot head block")
}

View file

@ -265,7 +265,7 @@ func (h *Head) ReadWAL() error {
// TODO(fabxc): series entries spread between samples can starve the sample workers.
// Even with bufferd channels, this can impact startup time with lots of series churn.
// We must not pralellize series creation itself but could make the indexing asynchronous.
// We must not paralellize series creation itself but could make the indexing asynchronous.
seriesFunc := func(series []RefSeries) {
for _, s := range series {
h.getOrCreateWithID(s.Ref, s.Labels.Hash(), s.Labels)
@ -762,10 +762,6 @@ func (c *safeChunk) Iterator() chunkenc.Iterator {
return it
}
// func (c *safeChunk) Appender() (chunks.Appender, error) { panic("illegal") }
// func (c *safeChunk) Bytes() []byte { panic("illegal") }
// func (c *safeChunk) Encoding() chunks.Encoding { panic("illegal") }
type headIndexReader struct {
head *Head
mint, maxt int64
@ -1259,7 +1255,7 @@ func (s *memSeries) iterator(id int) chunkenc.Iterator {
if id-s.firstChunkID < len(s.chunks)-1 {
return c.chunk.Iterator()
}
// Serve the last 4 samples for the last chunk from the series buffer
// Serve the last 4 samples for the last chunk from the sample buffer
// as their compressed bytes may be mutated by added samples.
it := &memSafeIterator{
Iterator: c.chunk.Iterator(),

6
vendor/vendor.json vendored
View file

@ -800,10 +800,10 @@
"revisionTime": "2016-04-11T19:08:41Z"
},
{
"checksumSHA1": "CeD8QwiLL5CBkWMOfbaJxs4AFuM=",
"checksumSHA1": "zVgXlbZ1J8GhBN7tZji7M/SuiAU=",
"path": "github.com/prometheus/tsdb",
"revision": "494acd307058387ced7646f9996b0f7372eaa558",
"revisionTime": "2018-02-15T11:29:47Z"
"revision": "16b2bf1b45ce3e3536c78ebec5116ea09a69786e",
"revisionTime": "2018-03-02T11:51:49Z"
},
{
"checksumSHA1": "S7F4yWxVLhxQNHMdgoOo6plmOOs=",