mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-26 14:09:41 -08:00
Update vendoring of prometheus/tsdb
This commit is contained in:
parent
2121b4628b
commit
e4167a5ca8
8
vendor/github.com/prometheus/tsdb/chunks.go
generated
vendored
8
vendor/github.com/prometheus/tsdb/chunks.go
generated
vendored
|
@ -170,6 +170,7 @@ func (w *chunkWriter) finalizeTail() error {
|
||||||
if err := tf.Truncate(off); err != nil {
|
if err := tf.Truncate(off); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return tf.Close()
|
return tf.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -276,7 +277,12 @@ func (w *chunkWriter) seq() int {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *chunkWriter) Close() error {
|
func (w *chunkWriter) Close() error {
|
||||||
return w.finalizeTail()
|
if err := w.finalizeTail(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// close dir file (if not windows platform will fail on rename)
|
||||||
|
return w.dirFile.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChunkReader provides reading access of serialized time series data.
|
// ChunkReader provides reading access of serialized time series data.
|
||||||
|
|
12
vendor/github.com/prometheus/tsdb/compact.go
generated
vendored
12
vendor/github.com/prometheus/tsdb/compact.go
generated
vendored
|
@ -426,12 +426,22 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "open temporary block dir")
|
return errors.Wrap(err, "open temporary block dir")
|
||||||
}
|
}
|
||||||
defer df.Close()
|
defer func() {
|
||||||
|
if df != nil {
|
||||||
|
df.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
if err := fileutil.Fsync(df); err != nil {
|
if err := fileutil.Fsync(df); err != nil {
|
||||||
return errors.Wrap(err, "sync temporary dir file")
|
return errors.Wrap(err, "sync temporary dir file")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// close temp dir before rename block dir(for windows platform)
|
||||||
|
if err = df.Close(); err != nil {
|
||||||
|
return errors.Wrap(err, "close temporary dir")
|
||||||
|
}
|
||||||
|
df = nil
|
||||||
|
|
||||||
// Block successfully written, make visible and remove old ones.
|
// Block successfully written, make visible and remove old ones.
|
||||||
if err := renameFile(tmp, dir); err != nil {
|
if err := renameFile(tmp, dir); err != nil {
|
||||||
return errors.Wrap(err, "rename block dir")
|
return errors.Wrap(err, "rename block dir")
|
||||||
|
|
3
vendor/github.com/prometheus/tsdb/db_windows.go
generated
vendored
3
vendor/github.com/prometheus/tsdb/db_windows.go
generated
vendored
|
@ -21,8 +21,7 @@ import (
|
||||||
|
|
||||||
func mmap(f *os.File, sz int) ([]byte, error) {
|
func mmap(f *os.File, sz int) ([]byte, error) {
|
||||||
low, high := uint32(sz), uint32(sz>>32)
|
low, high := uint32(sz), uint32(sz>>32)
|
||||||
|
h, errno := syscall.CreateFileMapping(syscall.Handle(f.Fd()), nil, syscall.PAGE_READONLY, high, low, nil)
|
||||||
h, errno := syscall.CreateFileMapping(syscall.Handle(f.Fd()), nil, syscall.PAGE_READONLY, low, high, nil)
|
|
||||||
if h == 0 {
|
if h == 0 {
|
||||||
return nil, os.NewSyscallError("CreateFileMapping", errno)
|
return nil, os.NewSyscallError("CreateFileMapping", errno)
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/prometheus/tsdb/head.go
generated
vendored
2
vendor/github.com/prometheus/tsdb/head.go
generated
vendored
|
@ -142,7 +142,7 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics {
|
||||||
})
|
})
|
||||||
m.samplesAppended = prometheus.NewCounter(prometheus.CounterOpts{
|
m.samplesAppended = prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
Name: "prometheus_tsdb_head_samples_appended_total",
|
Name: "prometheus_tsdb_head_samples_appended_total",
|
||||||
Help: "Total number of appended sampledb.",
|
Help: "Total number of appended samples.",
|
||||||
})
|
})
|
||||||
|
|
||||||
if r != nil {
|
if r != nil {
|
||||||
|
|
2
vendor/github.com/prometheus/tsdb/index.go
generated
vendored
2
vendor/github.com/prometheus/tsdb/index.go
generated
vendored
|
@ -153,6 +153,8 @@ func newIndexWriter(dir string) (*indexWriter, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
defer df.Close() // close for flatform windows
|
||||||
|
|
||||||
f, err := os.OpenFile(filepath.Join(dir, indexFilename), os.O_CREATE|os.O_WRONLY, 0666)
|
f, err := os.OpenFile(filepath.Join(dir, indexFilename), os.O_CREATE|os.O_WRONLY, 0666)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
10
vendor/github.com/prometheus/tsdb/tombstones.go
generated
vendored
10
vendor/github.com/prometheus/tsdb/tombstones.go
generated
vendored
|
@ -49,7 +49,11 @@ func writeTombstoneFile(dir string, tr tombstoneReader) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer func() {
|
||||||
|
if f != nil {
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
buf := encbuf{b: make([]byte, 3*binary.MaxVarintLen64)}
|
buf := encbuf{b: make([]byte, 3*binary.MaxVarintLen64)}
|
||||||
buf.reset()
|
buf.reset()
|
||||||
|
@ -82,6 +86,10 @@ func writeTombstoneFile(dir string, tr tombstoneReader) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err = f.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f = nil
|
||||||
return renameFile(tmp, path)
|
return renameFile(tmp, path)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
67
vendor/github.com/prometheus/tsdb/wal.go
generated
vendored
67
vendor/github.com/prometheus/tsdb/wal.go
generated
vendored
|
@ -190,6 +190,7 @@ type SegmentWAL struct {
|
||||||
|
|
||||||
stopc chan struct{}
|
stopc chan struct{}
|
||||||
donec chan struct{}
|
donec chan struct{}
|
||||||
|
actorc chan func() error // sequentialized background operations
|
||||||
buffers sync.Pool
|
buffers sync.Pool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -213,6 +214,7 @@ func OpenSegmentWAL(dir string, logger log.Logger, flushInterval time.Duration,
|
||||||
flushInterval: flushInterval,
|
flushInterval: flushInterval,
|
||||||
donec: make(chan struct{}),
|
donec: make(chan struct{}),
|
||||||
stopc: make(chan struct{}),
|
stopc: make(chan struct{}),
|
||||||
|
actorc: make(chan func() error, 1),
|
||||||
segmentSize: walSegmentSizeBytes,
|
segmentSize: walSegmentSizeBytes,
|
||||||
crc32: newCRC32(),
|
crc32: newCRC32(),
|
||||||
}
|
}
|
||||||
|
@ -384,7 +386,7 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(uint64) bool) error {
|
||||||
w.putBuffer(buf)
|
w.putBuffer(buf)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return errors.Wrap(err, "write to compaction segment")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if r.Err() != nil {
|
if r.Err() != nil {
|
||||||
|
@ -401,14 +403,15 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(uint64) bool) error {
|
||||||
csf.Sync()
|
csf.Sync()
|
||||||
csf.Close()
|
csf.Close()
|
||||||
|
|
||||||
|
candidates[0].Close() // need close before remove on platform windows
|
||||||
if err := renameFile(csf.Name(), candidates[0].Name()); err != nil {
|
if err := renameFile(csf.Name(), candidates[0].Name()); err != nil {
|
||||||
return err
|
return errors.Wrap(err, "rename compaction segment")
|
||||||
}
|
}
|
||||||
for _, f := range candidates[1:] {
|
for _, f := range candidates[1:] {
|
||||||
|
f.Close() // need close before remove on platform windows
|
||||||
if err := os.RemoveAll(f.Name()); err != nil {
|
if err := os.RemoveAll(f.Name()); err != nil {
|
||||||
return errors.Wrap(err, "delete WAL segment file")
|
return errors.Wrap(err, "delete WAL segment file")
|
||||||
}
|
}
|
||||||
f.Close()
|
|
||||||
}
|
}
|
||||||
if err := w.dirFile.Sync(); err != nil {
|
if err := w.dirFile.Sync(); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -522,6 +525,15 @@ func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) {
|
||||||
}
|
}
|
||||||
metab := make([]byte, 8)
|
metab := make([]byte, 8)
|
||||||
|
|
||||||
|
// If there is an error, we need close f for platform windows before gc.
|
||||||
|
// Otherwise, file op may fail.
|
||||||
|
hasError := true
|
||||||
|
defer func() {
|
||||||
|
if hasError {
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
if n, err := f.Read(metab); err != nil {
|
if n, err := f.Read(metab); err != nil {
|
||||||
return nil, errors.Wrapf(err, "validate meta %q", f.Name())
|
return nil, errors.Wrapf(err, "validate meta %q", f.Name())
|
||||||
} else if n != 8 {
|
} else if n != 8 {
|
||||||
|
@ -534,6 +546,7 @@ func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) {
|
||||||
if metab[4] != WALFormatDefault {
|
if metab[4] != WALFormatDefault {
|
||||||
return nil, errors.Errorf("unknown WAL segment format %d in %q", metab[4], f.Name())
|
return nil, errors.Errorf("unknown WAL segment format %d in %q", metab[4], f.Name())
|
||||||
}
|
}
|
||||||
|
hasError = false
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -569,18 +582,21 @@ func (w *SegmentWAL) cut() error {
|
||||||
// Finish last segment asynchronously to not block the WAL moving along
|
// Finish last segment asynchronously to not block the WAL moving along
|
||||||
// in the new segment.
|
// in the new segment.
|
||||||
go func() {
|
go func() {
|
||||||
off, err := hf.Seek(0, os.SEEK_CUR)
|
w.actorc <- func() error {
|
||||||
if err != nil {
|
off, err := hf.Seek(0, os.SEEK_CUR)
|
||||||
level.Error(w.logger).Log("msg", "finish old segment", "segment", hf.Name(), "err", err)
|
if err != nil {
|
||||||
}
|
return errors.Wrapf(err, "finish old segment %s", hf.Name())
|
||||||
if err := hf.Truncate(off); err != nil {
|
}
|
||||||
level.Error(w.logger).Log("msg", "finish old segment", "segment", hf.Name(), "err", err)
|
if err := hf.Truncate(off); err != nil {
|
||||||
}
|
return errors.Wrapf(err, "finish old segment %s", hf.Name())
|
||||||
if err := hf.Sync(); err != nil {
|
}
|
||||||
level.Error(w.logger).Log("msg", "finish old segment", "segment", hf.Name(), "err", err)
|
if err := hf.Sync(); err != nil {
|
||||||
}
|
return errors.Wrapf(err, "finish old segment %s", hf.Name())
|
||||||
if err := hf.Close(); err != nil {
|
}
|
||||||
level.Error(w.logger).Log("msg", "finish old segment", "segment", hf.Name(), "err", err)
|
if err := hf.Close(); err != nil {
|
||||||
|
return errors.Wrapf(err, "finish old segment %s", hf.Name())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
@ -595,8 +611,8 @@ func (w *SegmentWAL) cut() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if err = w.dirFile.Sync(); err != nil {
|
w.actorc <- func() error {
|
||||||
level.Error(w.logger).Log("msg", "sync WAL directory", "err", err)
|
return errors.Wrap(w.dirFile.Sync(), "sync WAL directory")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -675,9 +691,23 @@ func (w *SegmentWAL) run(interval time.Duration) {
|
||||||
defer close(w.donec)
|
defer close(w.donec)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
// Processing all enqueued operations has precedence over shutdown and
|
||||||
|
// background syncs.
|
||||||
|
select {
|
||||||
|
case f := <-w.actorc:
|
||||||
|
if err := f(); err != nil {
|
||||||
|
level.Error(w.logger).Log("msg", "operation failed", "err", err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
}
|
||||||
select {
|
select {
|
||||||
case <-w.stopc:
|
case <-w.stopc:
|
||||||
return
|
return
|
||||||
|
case f := <-w.actorc:
|
||||||
|
if err := f(); err != nil {
|
||||||
|
level.Error(w.logger).Log("msg", "operation failed", "err", err)
|
||||||
|
}
|
||||||
case <-tick:
|
case <-tick:
|
||||||
if err := w.Sync(); err != nil {
|
if err := w.Sync(); err != nil {
|
||||||
level.Error(w.logger).Log("msg", "sync failed", "err", err)
|
level.Error(w.logger).Log("msg", "sync failed", "err", err)
|
||||||
|
@ -702,7 +732,8 @@ func (w *SegmentWAL) Close() error {
|
||||||
if hf := w.head(); hf != nil {
|
if hf := w.head(); hf != nil {
|
||||||
return errors.Wrapf(hf.Close(), "closing WAL head %s", hf.Name())
|
return errors.Wrapf(hf.Close(), "closing WAL head %s", hf.Name())
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
return w.dirFile.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
6
vendor/vendor.json
vendored
6
vendor/vendor.json
vendored
|
@ -846,10 +846,10 @@
|
||||||
"revisionTime": "2016-04-11T19:08:41Z"
|
"revisionTime": "2016-04-11T19:08:41Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "qbhdcw451oyIWXj+0zlkR+rDi9Y=",
|
"checksumSHA1": "MZoz9kpR5PSUM9mJLh3c7nSrk9c=",
|
||||||
"path": "github.com/prometheus/tsdb",
|
"path": "github.com/prometheus/tsdb",
|
||||||
"revision": "5d28c849c7ff3b43e2829a44a9aac16468e076ce",
|
"revision": "b1df85781931b0ff48d09a364174016d16a4dc3e",
|
||||||
"revisionTime": "2017-10-25T14:52:11Z"
|
"revisionTime": "2017-11-01T17:11:22Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "uy6ySJ6EZqof+yMD2wTkYob8BeU=",
|
"checksumSHA1": "uy6ySJ6EZqof+yMD2wTkYob8BeU=",
|
||||||
|
|
Loading…
Reference in a new issue