mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-10 07:34:04 -08:00
vendor: bump TSDB
This commit is contained in:
parent
66b8bdbf4a
commit
63f31bd33d
6
vendor/github.com/prometheus/tsdb/block.go
generated
vendored
6
vendor/github.com/prometheus/tsdb/block.go
generated
vendored
|
@ -195,7 +195,7 @@ func readMetaFile(dir string) (*BlockMeta, error) {
|
||||||
if err := json.Unmarshal(b, &m); err != nil {
|
if err := json.Unmarshal(b, &m); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if m.Version != 1 && m.Version != 2 {
|
if m.Version != 1 {
|
||||||
return nil, errors.Errorf("unexpected meta file version %d", m.Version)
|
return nil, errors.Errorf("unexpected meta file version %d", m.Version)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -203,6 +203,8 @@ func readMetaFile(dir string) (*BlockMeta, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeMetaFile(dir string, meta *BlockMeta) error {
|
func writeMetaFile(dir string, meta *BlockMeta) error {
|
||||||
|
meta.Version = 1
|
||||||
|
|
||||||
// Make any changes to the file appear atomic.
|
// Make any changes to the file appear atomic.
|
||||||
path := filepath.Join(dir, metaFilename)
|
path := filepath.Join(dir, metaFilename)
|
||||||
tmp := path + ".tmp"
|
tmp := path + ".tmp"
|
||||||
|
@ -253,7 +255,7 @@ func OpenBlock(dir string, pool chunkenc.Pool) (*Block, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
ir, err := index.NewFileReader(filepath.Join(dir, "index"), meta.Version)
|
ir, err := index.NewFileReader(filepath.Join(dir, "index"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/prometheus/tsdb/chunkenc/xor.go
generated
vendored
2
vendor/github.com/prometheus/tsdb/chunkenc/xor.go
generated
vendored
|
@ -89,7 +89,6 @@ func (c *XORChunk) Appender() (Appender, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
a := &xorAppender{
|
a := &xorAppender{
|
||||||
c: c,
|
|
||||||
b: c.b,
|
b: c.b,
|
||||||
t: it.t,
|
t: it.t,
|
||||||
v: it.val,
|
v: it.val,
|
||||||
|
@ -119,7 +118,6 @@ func (c *XORChunk) Iterator() Iterator {
|
||||||
}
|
}
|
||||||
|
|
||||||
type xorAppender struct {
|
type xorAppender struct {
|
||||||
c *XORChunk
|
|
||||||
b *bstream
|
b *bstream
|
||||||
|
|
||||||
t int64
|
t int64
|
||||||
|
|
5
vendor/github.com/prometheus/tsdb/compact.go
generated
vendored
5
vendor/github.com/prometheus/tsdb/compact.go
generated
vendored
|
@ -91,8 +91,8 @@ func newCompactorMetrics(r prometheus.Registerer) *compactorMetrics {
|
||||||
Help: "Total number of compactions that failed for the partition.",
|
Help: "Total number of compactions that failed for the partition.",
|
||||||
})
|
})
|
||||||
m.duration = prometheus.NewHistogram(prometheus.HistogramOpts{
|
m.duration = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||||
Name: "prometheus_tsdb_compaction_duration",
|
Name: "prometheus_tsdb_compaction_duration_seconds",
|
||||||
Help: "Duration of compaction runs.",
|
Help: "Duration of compaction runs",
|
||||||
Buckets: prometheus.ExponentialBuckets(1, 2, 10),
|
Buckets: prometheus.ExponentialBuckets(1, 2, 10),
|
||||||
})
|
})
|
||||||
m.chunkSize = prometheus.NewHistogram(prometheus.HistogramOpts{
|
m.chunkSize = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||||
|
@ -431,7 +431,6 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "open index writer")
|
return errors.Wrap(err, "open index writer")
|
||||||
}
|
}
|
||||||
meta.Version = indexw.Version
|
|
||||||
|
|
||||||
if err := c.populateBlock(blocks, meta, indexw, chunkw); err != nil {
|
if err := c.populateBlock(blocks, meta, indexw, chunkw); err != nil {
|
||||||
return errors.Wrap(err, "write compaction")
|
return errors.Wrap(err, "write compaction")
|
||||||
|
|
28
vendor/github.com/prometheus/tsdb/db.go
generated
vendored
28
vendor/github.com/prometheus/tsdb/db.go
generated
vendored
|
@ -122,6 +122,8 @@ type dbMetrics struct {
|
||||||
reloads prometheus.Counter
|
reloads prometheus.Counter
|
||||||
reloadsFailed prometheus.Counter
|
reloadsFailed prometheus.Counter
|
||||||
compactionsTriggered prometheus.Counter
|
compactionsTriggered prometheus.Counter
|
||||||
|
cutoffs prometheus.Counter
|
||||||
|
cutoffsFailed prometheus.Counter
|
||||||
tombCleanTimer prometheus.Histogram
|
tombCleanTimer prometheus.Histogram
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -148,6 +150,14 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
|
||||||
Name: "prometheus_tsdb_compactions_triggered_total",
|
Name: "prometheus_tsdb_compactions_triggered_total",
|
||||||
Help: "Total number of triggered compactions for the partition.",
|
Help: "Total number of triggered compactions for the partition.",
|
||||||
})
|
})
|
||||||
|
m.cutoffs = prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
|
Name: "prometheus_tsdb_retention_cutoffs_total",
|
||||||
|
Help: "Number of times the database cut off block data from disk.",
|
||||||
|
})
|
||||||
|
m.cutoffsFailed = prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
|
Name: "prometheus_tsdb_retention_cutoffs_failures_total",
|
||||||
|
Help: "Number of times the database failed to cut off block data from disk.",
|
||||||
|
})
|
||||||
m.tombCleanTimer = prometheus.NewHistogram(prometheus.HistogramOpts{
|
m.tombCleanTimer = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||||
Name: "prometheus_tsdb_tombstone_cleanup_seconds",
|
Name: "prometheus_tsdb_tombstone_cleanup_seconds",
|
||||||
Help: "The time taken to recompact blocks to remove tombstones.",
|
Help: "The time taken to recompact blocks to remove tombstones.",
|
||||||
|
@ -158,6 +168,8 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
|
||||||
m.loadedBlocks,
|
m.loadedBlocks,
|
||||||
m.reloads,
|
m.reloads,
|
||||||
m.reloadsFailed,
|
m.reloadsFailed,
|
||||||
|
m.cutoffs,
|
||||||
|
m.cutoffsFailed,
|
||||||
m.compactionsTriggered,
|
m.compactionsTriggered,
|
||||||
m.tombCleanTimer,
|
m.tombCleanTimer,
|
||||||
)
|
)
|
||||||
|
@ -176,6 +188,10 @@ func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options) (db
|
||||||
if opts == nil {
|
if opts == nil {
|
||||||
opts = DefaultOptions
|
opts = DefaultOptions
|
||||||
}
|
}
|
||||||
|
// Fixup bad format written by Prometheus 2.1.
|
||||||
|
if err := repairBadIndexVersion(l, dir); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
db = &DB{
|
db = &DB{
|
||||||
dir: dir,
|
dir: dir,
|
||||||
|
@ -277,7 +293,17 @@ func (db *DB) run() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *DB) retentionCutoff() (bool, error) {
|
func (db *DB) retentionCutoff() (b bool, err error) {
|
||||||
|
defer func() {
|
||||||
|
if !b && err == nil {
|
||||||
|
// no data had to be cut off.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
db.metrics.cutoffs.Inc()
|
||||||
|
if err != nil {
|
||||||
|
db.metrics.cutoffsFailed.Inc()
|
||||||
|
}
|
||||||
|
}()
|
||||||
if db.opts.RetentionDuration == 0 {
|
if db.opts.RetentionDuration == 0 {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
46
vendor/github.com/prometheus/tsdb/index/index.go
generated
vendored
46
vendor/github.com/prometheus/tsdb/index/index.go
generated
vendored
|
@ -37,6 +37,7 @@ const (
|
||||||
MagicIndex = 0xBAAAD700
|
MagicIndex = 0xBAAAD700
|
||||||
|
|
||||||
indexFormatV1 = 1
|
indexFormatV1 = 1
|
||||||
|
indexFormatV2 = 2
|
||||||
)
|
)
|
||||||
|
|
||||||
type indexWriterSeries struct {
|
type indexWriterSeries struct {
|
||||||
|
@ -135,7 +136,7 @@ type indexTOC struct {
|
||||||
postingsTable uint64
|
postingsTable uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWriter returns a new Writer to the given filename.
|
// NewWriter returns a new Writer to the given filename. It serializes data in format version 2.
|
||||||
func NewWriter(fn string) (*Writer, error) {
|
func NewWriter(fn string) (*Writer, error) {
|
||||||
dir := filepath.Dir(fn)
|
dir := filepath.Dir(fn)
|
||||||
|
|
||||||
|
@ -168,8 +169,6 @@ func NewWriter(fn string) (*Writer, error) {
|
||||||
symbols: make(map[string]uint32, 1<<13),
|
symbols: make(map[string]uint32, 1<<13),
|
||||||
seriesOffsets: make(map[uint64]uint64, 1<<16),
|
seriesOffsets: make(map[uint64]uint64, 1<<16),
|
||||||
crc32: newCRC32(),
|
crc32: newCRC32(),
|
||||||
|
|
||||||
Version: 2,
|
|
||||||
}
|
}
|
||||||
if err := iw.writeMeta(); err != nil {
|
if err := iw.writeMeta(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -195,7 +194,7 @@ func (w *Writer) write(bufs ...[]byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// addPadding adds zero byte padding until the file size is a multiple size_unit.
|
// addPadding adds zero byte padding until the file size is a multiple size.
|
||||||
func (w *Writer) addPadding(size int) error {
|
func (w *Writer) addPadding(size int) error {
|
||||||
p := w.pos % uint64(size)
|
p := w.pos % uint64(size)
|
||||||
if p == 0 {
|
if p == 0 {
|
||||||
|
@ -249,7 +248,7 @@ func (w *Writer) ensureStage(s indexWriterStage) error {
|
||||||
func (w *Writer) writeMeta() error {
|
func (w *Writer) writeMeta() error {
|
||||||
w.buf1.reset()
|
w.buf1.reset()
|
||||||
w.buf1.putBE32(MagicIndex)
|
w.buf1.putBE32(MagicIndex)
|
||||||
w.buf1.putByte(indexFormatV1)
|
w.buf1.putByte(indexFormatV2)
|
||||||
|
|
||||||
return w.write(w.buf1.get())
|
return w.write(w.buf1.get())
|
||||||
}
|
}
|
||||||
|
@ -266,7 +265,13 @@ func (w *Writer) AddSeries(ref uint64, lset labels.Labels, chunks ...chunks.Meta
|
||||||
if _, ok := w.seriesOffsets[ref]; ok {
|
if _, ok := w.seriesOffsets[ref]; ok {
|
||||||
return errors.Errorf("series with reference %d already added", ref)
|
return errors.Errorf("series with reference %d already added", ref)
|
||||||
}
|
}
|
||||||
|
// We add padding to 16 bytes to increase the addressable space we get through 4 byte
|
||||||
|
// series references.
|
||||||
w.addPadding(16)
|
w.addPadding(16)
|
||||||
|
|
||||||
|
if w.pos%16 != 0 {
|
||||||
|
return errors.Errorf("series write not 16-byte aligned at %d", w.pos)
|
||||||
|
}
|
||||||
w.seriesOffsets[ref] = w.pos / 16
|
w.seriesOffsets[ref] = w.pos / 16
|
||||||
|
|
||||||
w.buf2.reset()
|
w.buf2.reset()
|
||||||
|
@ -572,25 +577,22 @@ func (b realByteSlice) Sub(start, end int) ByteSlice {
|
||||||
return b[start:end]
|
return b[start:end]
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewReader returns a new IndexReader on the given byte slice.
|
// NewReader returns a new IndexReader on the given byte slice. It automatically
|
||||||
func NewReader(b ByteSlice, version int) (*Reader, error) {
|
// handles different format versions.
|
||||||
return newReader(b, nil, version)
|
func NewReader(b ByteSlice) (*Reader, error) {
|
||||||
|
return newReader(b, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFileReader returns a new index reader against the given index file.
|
// NewFileReader returns a new index reader against the given index file.
|
||||||
func NewFileReader(path string, version int) (*Reader, error) {
|
func NewFileReader(path string) (*Reader, error) {
|
||||||
f, err := fileutil.OpenMmapFile(path)
|
f, err := fileutil.OpenMmapFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return newReader(realByteSlice(f.Bytes()), f, version)
|
return newReader(realByteSlice(f.Bytes()), f)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newReader(b ByteSlice, c io.Closer, version int) (*Reader, error) {
|
func newReader(b ByteSlice, c io.Closer) (*Reader, error) {
|
||||||
if version != 1 && version != 2 {
|
|
||||||
return nil, errors.Errorf("unexpected file version %d", version)
|
|
||||||
}
|
|
||||||
|
|
||||||
r := &Reader{
|
r := &Reader{
|
||||||
b: b,
|
b: b,
|
||||||
c: c,
|
c: c,
|
||||||
|
@ -598,16 +600,20 @@ func newReader(b ByteSlice, c io.Closer, version int) (*Reader, error) {
|
||||||
labels: map[string]uint32{},
|
labels: map[string]uint32{},
|
||||||
postings: map[labels.Label]uint32{},
|
postings: map[labels.Label]uint32{},
|
||||||
crc32: newCRC32(),
|
crc32: newCRC32(),
|
||||||
version: version,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify magic number.
|
// Verify header.
|
||||||
if b.Len() < 4 {
|
if b.Len() < 5 {
|
||||||
return nil, errors.Wrap(errInvalidSize, "index header")
|
return nil, errors.Wrap(errInvalidSize, "index header")
|
||||||
}
|
}
|
||||||
if m := binary.BigEndian.Uint32(r.b.Range(0, 4)); m != MagicIndex {
|
if m := binary.BigEndian.Uint32(r.b.Range(0, 4)); m != MagicIndex {
|
||||||
return nil, errors.Errorf("invalid magic number %x", m)
|
return nil, errors.Errorf("invalid magic number %x", m)
|
||||||
}
|
}
|
||||||
|
r.version = int(r.b.Range(4, 5)[0])
|
||||||
|
|
||||||
|
if r.version != 1 && r.version != 2 {
|
||||||
|
return nil, errors.Errorf("unknown index file version %d", r.version)
|
||||||
|
}
|
||||||
|
|
||||||
if err := r.readTOC(); err != nil {
|
if err := r.readTOC(); err != nil {
|
||||||
return nil, errors.Wrap(err, "read TOC")
|
return nil, errors.Wrap(err, "read TOC")
|
||||||
|
@ -880,8 +886,10 @@ func (r *Reader) LabelIndices() ([][]string, error) {
|
||||||
// Series reads the series with the given ID and writes its labels and chunks into lbls and chks.
|
// Series reads the series with the given ID and writes its labels and chunks into lbls and chks.
|
||||||
func (r *Reader) Series(id uint64, lbls *labels.Labels, chks *[]chunks.Meta) error {
|
func (r *Reader) Series(id uint64, lbls *labels.Labels, chks *[]chunks.Meta) error {
|
||||||
offset := id
|
offset := id
|
||||||
|
// In version 2 series IDs are no longer exact references but series are 16-byte padded
|
||||||
|
// and the ID is the multiple of 16 of the actual position.
|
||||||
if r.version == 2 {
|
if r.version == 2 {
|
||||||
offset = 16 * id
|
offset = id * 16
|
||||||
}
|
}
|
||||||
d := r.decbufUvarintAt(int(offset))
|
d := r.decbufUvarintAt(int(offset))
|
||||||
if d.err() != nil {
|
if d.err() != nil {
|
||||||
|
|
90
vendor/github.com/prometheus/tsdb/repair.go
generated
vendored
Normal file
90
vendor/github.com/prometheus/tsdb/repair.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
||||||
|
package tsdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
|
"github.com/oklog/ulid"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/prometheus/tsdb/fileutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// repairBadIndexVersion repairs an issue in index and meta.json persistence introduced in
|
||||||
|
// commit 129773b41a565fde5156301e37f9a87158030443.
|
||||||
|
func repairBadIndexVersion(logger log.Logger, dir string) error {
|
||||||
|
// All blocks written by Prometheus 2.1 with a meta.json version of 2 are affected.
|
||||||
|
// We must actually set the index file version to 2 and revert the meta.json version back to 1.
|
||||||
|
subdirs, err := fileutil.ReadDir(dir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, d := range subdirs {
|
||||||
|
// Skip non-block dirs.
|
||||||
|
if _, err := ulid.Parse(d); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
d = path.Join(dir, d)
|
||||||
|
|
||||||
|
meta, err := readBogusMetaFile(d)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if meta.Version == 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
level.Info(logger).Log("msg", "fixing broken block", "ulid", meta.ULID)
|
||||||
|
|
||||||
|
repl, err := os.Create(filepath.Join(d, "index.repaired"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
broken, err := os.Open(filepath.Join(d, "index"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.Copy(repl, broken); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Set the 5th byte to 2 to indiciate the correct file format version.
|
||||||
|
if _, err := repl.WriteAt([]byte{2}, 4); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := fileutil.Fsync(repl); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := repl.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := renameFile(repl.Name(), broken.Name()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Reset version of meta.json to 1.
|
||||||
|
meta.Version = 1
|
||||||
|
if err := writeMetaFile(d, meta); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readBogusMetaFile(dir string) (*BlockMeta, error) {
|
||||||
|
b, err := ioutil.ReadFile(filepath.Join(dir, metaFilename))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var m BlockMeta
|
||||||
|
|
||||||
|
if err := json.Unmarshal(b, &m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if m.Version != 1 && m.Version != 2 {
|
||||||
|
return nil, errors.Errorf("unexpected meta file version %d", m.Version)
|
||||||
|
}
|
||||||
|
return &m, nil
|
||||||
|
}
|
30
vendor/vendor.json
vendored
30
vendor/vendor.json
vendored
|
@ -800,40 +800,40 @@
|
||||||
"revisionTime": "2016-04-11T19:08:41Z"
|
"revisionTime": "2016-04-11T19:08:41Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "5mCM640B2xa7y+kRRUeVCglEk7o=",
|
"checksumSHA1": "CeD8QwiLL5CBkWMOfbaJxs4AFuM=",
|
||||||
"path": "github.com/prometheus/tsdb",
|
"path": "github.com/prometheus/tsdb",
|
||||||
"revision": "44dd5e1202b7598d50c69ce3617ca6ae6503cf52",
|
"revision": "bc49a665d16326ad6ad078b97e0f903d894f80f8",
|
||||||
"revisionTime": "2018-01-26T14:54:38Z"
|
"revisionTime": "2018-02-12T10:58:46Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "XTirmk6Pq5TBGIZEaN5VL4k3i1s=",
|
"checksumSHA1": "4X26TfLh8M4WuCFo/5a+Qk+ieSw=",
|
||||||
"path": "github.com/prometheus/tsdb/chunkenc",
|
"path": "github.com/prometheus/tsdb/chunkenc",
|
||||||
"revision": "467948f3c3f2f6f4ed9881afba27dd3ae24393eb",
|
"revision": "bc49a665d16326ad6ad078b97e0f903d894f80f8",
|
||||||
"revisionTime": "2018-01-18T08:32:54Z"
|
"revisionTime": "2018-02-12T10:58:46Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "+zsn1i8cqwgZXL8Bg6jDy32xjAo=",
|
"checksumSHA1": "+zsn1i8cqwgZXL8Bg6jDy32xjAo=",
|
||||||
"path": "github.com/prometheus/tsdb/chunks",
|
"path": "github.com/prometheus/tsdb/chunks",
|
||||||
"revision": "467948f3c3f2f6f4ed9881afba27dd3ae24393eb",
|
"revision": "bc49a665d16326ad6ad078b97e0f903d894f80f8",
|
||||||
"revisionTime": "2018-01-18T08:32:54Z"
|
"revisionTime": "2018-02-12T10:58:46Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "h49AAcJ5+iRBwCgbfQf+2T1E1ZE=",
|
"checksumSHA1": "h49AAcJ5+iRBwCgbfQf+2T1E1ZE=",
|
||||||
"path": "github.com/prometheus/tsdb/fileutil",
|
"path": "github.com/prometheus/tsdb/fileutil",
|
||||||
"revision": "467948f3c3f2f6f4ed9881afba27dd3ae24393eb",
|
"revision": "bc49a665d16326ad6ad078b97e0f903d894f80f8",
|
||||||
"revisionTime": "2018-01-18T08:32:54Z"
|
"revisionTime": "2018-02-12T10:58:46Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "yuiJaE3cAmZ/ws8NOfd56x36Sg4=",
|
"checksumSHA1": "Cpumd1FEx22Kz8I3HrhMPrSRPNA=",
|
||||||
"path": "github.com/prometheus/tsdb/index",
|
"path": "github.com/prometheus/tsdb/index",
|
||||||
"revision": "467948f3c3f2f6f4ed9881afba27dd3ae24393eb",
|
"revision": "bc49a665d16326ad6ad078b97e0f903d894f80f8",
|
||||||
"revisionTime": "2018-01-18T08:32:54Z"
|
"revisionTime": "2018-02-12T10:58:46Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "Va8HWvOFTwFeewZFadMAOzNGDps=",
|
"checksumSHA1": "Va8HWvOFTwFeewZFadMAOzNGDps=",
|
||||||
"path": "github.com/prometheus/tsdb/labels",
|
"path": "github.com/prometheus/tsdb/labels",
|
||||||
"revision": "467948f3c3f2f6f4ed9881afba27dd3ae24393eb",
|
"revision": "bc49a665d16326ad6ad078b97e0f903d894f80f8",
|
||||||
"revisionTime": "2018-01-18T08:32:54Z"
|
"revisionTime": "2018-02-12T10:58:46Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "5SYLEhADhdBVZAGPVHWggQl7H8k=",
|
"checksumSHA1": "5SYLEhADhdBVZAGPVHWggQl7H8k=",
|
||||||
|
|
Loading…
Reference in a new issue