mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-10 23:54:05 -08:00
Merge pull request #3202 from prometheus/beta5
*: release v2.0.0-beta.5
This commit is contained in:
commit
64c7932e85
20
CHANGELOG.md
20
CHANGELOG.md
|
@ -1,17 +1,17 @@
|
||||||
## v2.0.0-beta.4 / 2017-09-14
|
## v2.0.0-beta.5 / 2017-09-21
|
||||||
|
|
||||||
This release includes numerous changes to the new storage layer. The main changes are:
|
This release includes numerous changes to the new storage layer. The main changes are:
|
||||||
|
|
||||||
* [CHANGES] Single, compacted write ahead log
|
* [BUGFIX] Remove deadlock on startup when restoring WAL
|
||||||
* [CHANGES] Single in-memory block with garbage collection
|
* [BUGFIX] Fix semantical races resulting in invalid persisted files
|
||||||
* [ENHANCEMENTS] Cache series dropped via `metric_relabel_configs`
|
* [BUGFIX] Correctly read back WAL in certain edge cases
|
||||||
* [ENHANCEMENTS] Pool byte buffers for scraping
|
* [BUGFIX] Prevent crashes caused by changing metric representations in target's /metrics
|
||||||
|
* [ENHANCEMENT] Overall memory usage reduction
|
||||||
|
* [ENHANCEMENT] Serve debugging endpoints while TSDB is loading
|
||||||
|
* [ENHANCEMENT] Healthy endpoint correctly reflects liveness during startup
|
||||||
|
* [ENHANCEMENT] Switch to consistent usage of go-kit/log
|
||||||
|
|
||||||
Overall the changes achieve a baseline reduction in memory consumption and reduce
|
This release may have issues with files written by previous beta releases.
|
||||||
peak memory usage by 30-40% compared to the 2.0.0-beta.2 release.
|
|
||||||
|
|
||||||
This release requires a clean storage directory and is not compatible with files
|
|
||||||
created by previous beta releases.
|
|
||||||
|
|
||||||
## 1.7.1 / 2017-06-12
|
## 1.7.1 / 2017-06-12
|
||||||
|
|
||||||
|
|
2
vendor/github.com/prometheus/tsdb/compact.go
generated
vendored
2
vendor/github.com/prometheus/tsdb/compact.go
generated
vendored
|
@ -457,7 +457,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
|
||||||
|
|
||||||
indexr := b.Index()
|
indexr := b.Index()
|
||||||
|
|
||||||
all, err := indexr.Postings("", "")
|
all, err := indexr.Postings(allPostingsKey.Name, allPostingsKey.Value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
20
vendor/github.com/prometheus/tsdb/head.go
generated
vendored
20
vendor/github.com/prometheus/tsdb/head.go
generated
vendored
|
@ -190,6 +190,10 @@ func (h *Head) ReadWAL() error {
|
||||||
r := h.wal.Reader()
|
r := h.wal.Reader()
|
||||||
mint := h.MinTime()
|
mint := h.MinTime()
|
||||||
|
|
||||||
|
// Track number of samples that referenced a series we don't know about
|
||||||
|
// for error reporting.
|
||||||
|
var unknownRefs int
|
||||||
|
|
||||||
seriesFunc := func(series []RefSeries) error {
|
seriesFunc := func(series []RefSeries) error {
|
||||||
for _, s := range series {
|
for _, s := range series {
|
||||||
h.getOrCreateWithID(s.Ref, s.Labels.Hash(), s.Labels)
|
h.getOrCreateWithID(s.Ref, s.Labels.Hash(), s.Labels)
|
||||||
|
@ -207,7 +211,7 @@ func (h *Head) ReadWAL() error {
|
||||||
}
|
}
|
||||||
ms := h.series.getByID(s.Ref)
|
ms := h.series.getByID(s.Ref)
|
||||||
if ms == nil {
|
if ms == nil {
|
||||||
h.logger.Log("msg", "unknown series reference in WAL", "ref", s.Ref)
|
unknownRefs++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
_, chunkCreated := ms.append(s.T, s.V)
|
_, chunkCreated := ms.append(s.T, s.V)
|
||||||
|
@ -230,6 +234,10 @@ func (h *Head) ReadWAL() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if unknownRefs > 0 {
|
||||||
|
h.logger.Log("msg", "unknown series references in WAL samples", "count", unknownRefs)
|
||||||
|
}
|
||||||
|
|
||||||
if err := r.Read(seriesFunc, samplesFunc, deletesFunc); err != nil {
|
if err := r.Read(seriesFunc, samplesFunc, deletesFunc); err != nil {
|
||||||
return errors.Wrap(err, "consume WAL")
|
return errors.Wrap(err, "consume WAL")
|
||||||
}
|
}
|
||||||
|
@ -267,12 +275,10 @@ func (h *Head) Truncate(mint int64) error {
|
||||||
|
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
|
|
||||||
p, err := h.indexRange(mint, math.MaxInt64).Postings("", "")
|
keep := func(id uint64) bool {
|
||||||
if err != nil {
|
return h.series.getByID(id) != nil
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
if err := h.wal.Truncate(mint, keep); err == nil {
|
||||||
if err := h.wal.Truncate(mint, p); err == nil {
|
|
||||||
h.logger.Log("msg", "WAL truncation completed", "duration", time.Since(start))
|
h.logger.Log("msg", "WAL truncation completed", "duration", time.Since(start))
|
||||||
} else {
|
} else {
|
||||||
h.logger.Log("msg", "WAL truncation failed", "err", err, "duration", time.Since(start))
|
h.logger.Log("msg", "WAL truncation failed", "err", err, "duration", time.Since(start))
|
||||||
|
@ -1038,8 +1044,6 @@ func (s *stripeSeries) getOrSet(hash uint64, series *memSeries) (*memSeries, boo
|
||||||
return prev, false
|
return prev, false
|
||||||
}
|
}
|
||||||
s.hashes[i].set(hash, series)
|
s.hashes[i].set(hash, series)
|
||||||
|
|
||||||
s.hashes[i][hash] = append(s.hashes[i][hash], series)
|
|
||||||
s.locks[i].Unlock()
|
s.locks[i].Unlock()
|
||||||
|
|
||||||
i = series.ref & stripeMask
|
i = series.ref & stripeMask
|
||||||
|
|
29
vendor/github.com/prometheus/tsdb/postings.go
generated
vendored
29
vendor/github.com/prometheus/tsdb/postings.go
generated
vendored
|
@ -45,7 +45,7 @@ func (p *memPostings) get(name, value string) Postings {
|
||||||
return newListPostings(l)
|
return newListPostings(l)
|
||||||
}
|
}
|
||||||
|
|
||||||
var allLabel = labels.Label{}
|
var allPostingsKey = labels.Label{}
|
||||||
|
|
||||||
// add adds a document to the index. The caller has to ensure that no
|
// add adds a document to the index. The caller has to ensure that no
|
||||||
// term argument appears twice.
|
// term argument appears twice.
|
||||||
|
@ -53,13 +53,36 @@ func (p *memPostings) add(id uint64, lset labels.Labels) {
|
||||||
p.mtx.Lock()
|
p.mtx.Lock()
|
||||||
|
|
||||||
for _, l := range lset {
|
for _, l := range lset {
|
||||||
p.m[l] = append(p.m[l], id)
|
p.addFor(id, l)
|
||||||
}
|
}
|
||||||
p.m[allLabel] = append(p.m[allLabel], id)
|
p.addFor(id, allPostingsKey)
|
||||||
|
|
||||||
p.mtx.Unlock()
|
p.mtx.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *memPostings) addFor(id uint64, l labels.Label) {
|
||||||
|
list := append(p.m[l], id)
|
||||||
|
p.m[l] = list
|
||||||
|
|
||||||
|
// There is no guarantee that no higher ID was inserted before as they may
|
||||||
|
// be generated independently before adding them to postings.
|
||||||
|
// We repair order violations on insert. The invariant is that the first n-1
|
||||||
|
// items in the list are already sorted.
|
||||||
|
for i := len(list) - 1; i >= 1; i-- {
|
||||||
|
if list[i] >= list[i-1] {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
list[i], list[i-1] = list[i-1], list[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func expandPostings(p Postings) (res []uint64, err error) {
|
||||||
|
for p.Next() {
|
||||||
|
res = append(res, p.At())
|
||||||
|
}
|
||||||
|
return res, p.Err()
|
||||||
|
}
|
||||||
|
|
||||||
// Postings provides iterative access over a postings list.
|
// Postings provides iterative access over a postings list.
|
||||||
type Postings interface {
|
type Postings interface {
|
||||||
// Next advances the iterator and returns true if another value was found.
|
// Next advances the iterator and returns true if another value was found.
|
||||||
|
|
15
vendor/github.com/prometheus/tsdb/wal.go
generated
vendored
15
vendor/github.com/prometheus/tsdb/wal.go
generated
vendored
|
@ -71,7 +71,7 @@ type WAL interface {
|
||||||
LogSeries([]RefSeries) error
|
LogSeries([]RefSeries) error
|
||||||
LogSamples([]RefSample) error
|
LogSamples([]RefSample) error
|
||||||
LogDeletes([]Stone) error
|
LogDeletes([]Stone) error
|
||||||
Truncate(int64, Postings) error
|
Truncate(mint int64, keep func(uint64) bool) error
|
||||||
Close() error
|
Close() error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,7 +87,7 @@ func (w nopWAL) Reader() WALReader { return w }
|
||||||
func (nopWAL) LogSeries([]RefSeries) error { return nil }
|
func (nopWAL) LogSeries([]RefSeries) error { return nil }
|
||||||
func (nopWAL) LogSamples([]RefSample) error { return nil }
|
func (nopWAL) LogSamples([]RefSample) error { return nil }
|
||||||
func (nopWAL) LogDeletes([]Stone) error { return nil }
|
func (nopWAL) LogDeletes([]Stone) error { return nil }
|
||||||
func (nopWAL) Truncate(int64, Postings) error { return nil }
|
func (nopWAL) Truncate(int64, func(uint64) bool) error { return nil }
|
||||||
func (nopWAL) Close() error { return nil }
|
func (nopWAL) Close() error { return nil }
|
||||||
|
|
||||||
// WALReader reads entries from a WAL.
|
// WALReader reads entries from a WAL.
|
||||||
|
@ -272,8 +272,9 @@ func (w *SegmentWAL) putBuffer(b *encbuf) {
|
||||||
w.buffers.Put(b)
|
w.buffers.Put(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Truncate deletes the values prior to mint and the series entries not in p.
|
// Truncate deletes the values prior to mint and the series which the keep function
|
||||||
func (w *SegmentWAL) Truncate(mint int64, p Postings) error {
|
// does not indiciate to preserve.
|
||||||
|
func (w *SegmentWAL) Truncate(mint int64, keep func(uint64) bool) error {
|
||||||
// The last segment is always active.
|
// The last segment is always active.
|
||||||
if len(w.files) < 2 {
|
if len(w.files) < 2 {
|
||||||
return nil
|
return nil
|
||||||
|
@ -314,7 +315,6 @@ func (w *SegmentWAL) Truncate(mint int64, p Postings) error {
|
||||||
activeSeries = []RefSeries{}
|
activeSeries = []RefSeries{}
|
||||||
)
|
)
|
||||||
|
|
||||||
Loop:
|
|
||||||
for r.next() {
|
for r.next() {
|
||||||
rt, flag, byt := r.at()
|
rt, flag, byt := r.at()
|
||||||
|
|
||||||
|
@ -328,10 +328,7 @@ Loop:
|
||||||
activeSeries = activeSeries[:0]
|
activeSeries = activeSeries[:0]
|
||||||
|
|
||||||
for _, s := range series {
|
for _, s := range series {
|
||||||
if !p.Seek(s.Ref) {
|
if keep(s.Ref) {
|
||||||
break Loop
|
|
||||||
}
|
|
||||||
if p.At() == s.Ref {
|
|
||||||
activeSeries = append(activeSeries, s)
|
activeSeries = append(activeSeries, s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
14
vendor/vendor.json
vendored
14
vendor/vendor.json
vendored
|
@ -871,22 +871,22 @@
|
||||||
"revisionTime": "2016-04-11T19:08:41Z"
|
"revisionTime": "2016-04-11T19:08:41Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "B5ndMoK8lqgFJ8xUZ/0V4zCpUw0=",
|
"checksumSHA1": "evkeOdR0mTFS7yyREas6oa1QvHY=",
|
||||||
"path": "github.com/prometheus/tsdb",
|
"path": "github.com/prometheus/tsdb",
|
||||||
"revision": "162a48e4f2c6e486a0ebf61cf9cea73a8023ef0a",
|
"revision": "69f105f4f9478e929ef2a7d7553a7558b1de5c84",
|
||||||
"revisionTime": "2017-09-19T08:20:19Z"
|
"revisionTime": "2017-09-21T12:57:51Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "Gua979gmISm4cJP/fR2hL8m5To8=",
|
"checksumSHA1": "Gua979gmISm4cJP/fR2hL8m5To8=",
|
||||||
"path": "github.com/prometheus/tsdb/chunks",
|
"path": "github.com/prometheus/tsdb/chunks",
|
||||||
"revision": "162a48e4f2c6e486a0ebf61cf9cea73a8023ef0a",
|
"revision": "69f105f4f9478e929ef2a7d7553a7558b1de5c84",
|
||||||
"revisionTime": "2017-09-19T08:20:19Z"
|
"revisionTime": "2017-09-21T12:57:51Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "zhmlvc322RH1L3l9DaA9d/HVVWs=",
|
"checksumSHA1": "zhmlvc322RH1L3l9DaA9d/HVVWs=",
|
||||||
"path": "github.com/prometheus/tsdb/labels",
|
"path": "github.com/prometheus/tsdb/labels",
|
||||||
"revision": "162a48e4f2c6e486a0ebf61cf9cea73a8023ef0a",
|
"revision": "69f105f4f9478e929ef2a7d7553a7558b1de5c84",
|
||||||
"revisionTime": "2017-09-19T08:20:19Z"
|
"revisionTime": "2017-09-21T12:57:51Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "5SYLEhADhdBVZAGPVHWggQl7H8k=",
|
"checksumSHA1": "5SYLEhADhdBVZAGPVHWggQl7H8k=",
|
||||||
|
|
Loading…
Reference in a new issue