2015-01-21 11:07:45 -08:00
|
|
|
// Copyright 2014 The Prometheus Authors
|
2014-09-19 09:18:44 -07:00
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2014-09-16 06:47:24 -07:00
|
|
|
package local
|
2014-06-06 02:55:53 -07:00
|
|
|
|
|
|
|
import (
|
2016-10-25 05:59:33 -07:00
|
|
|
"fmt"
|
2014-06-06 02:55:53 -07:00
|
|
|
"sort"
|
|
|
|
"sync"
|
2015-03-08 18:33:10 -07:00
|
|
|
"time"
|
2014-06-06 02:55:53 -07:00
|
|
|
|
2015-08-20 08:18:46 -07:00
|
|
|
"github.com/prometheus/common/model"
|
2014-06-06 02:55:53 -07:00
|
|
|
|
2016-09-21 14:44:27 -07:00
|
|
|
"github.com/prometheus/prometheus/storage/local/chunk"
|
2014-06-06 02:55:53 -07:00
|
|
|
"github.com/prometheus/prometheus/storage/metric"
|
|
|
|
)
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// fingerprintSeriesPair pairs a fingerprint with a memorySeries pointer.
|
|
|
|
type fingerprintSeriesPair struct {
|
2015-08-20 08:18:46 -07:00
|
|
|
fp model.Fingerprint
|
2014-10-07 10:11:24 -07:00
|
|
|
series *memorySeries
|
|
|
|
}
|
|
|
|
|
|
|
|
// seriesMap maps fingerprints to memory series. All its methods are
|
|
|
|
// goroutine-safe. A SeriesMap is effectively is a goroutine-safe version of
|
2015-08-20 08:18:46 -07:00
|
|
|
// map[model.Fingerprint]*memorySeries.
|
2014-10-07 10:11:24 -07:00
|
|
|
type seriesMap struct {
|
|
|
|
mtx sync.RWMutex
|
2015-08-20 08:18:46 -07:00
|
|
|
m map[model.Fingerprint]*memorySeries
|
2014-10-07 10:11:24 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// newSeriesMap returns a newly allocated empty seriesMap. To create a seriesMap
|
|
|
|
// based on a prefilled map, use an explicit initializer.
|
2014-10-08 04:49:42 -07:00
|
|
|
func newSeriesMap() *seriesMap {
|
2015-08-20 08:18:46 -07:00
|
|
|
return &seriesMap{m: make(map[model.Fingerprint]*memorySeries)}
|
2014-10-07 10:11:24 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// length returns the number of mappings in the seriesMap.
|
2014-10-08 04:49:42 -07:00
|
|
|
func (sm *seriesMap) length() int {
|
2014-10-07 10:11:24 -07:00
|
|
|
sm.mtx.RLock()
|
|
|
|
defer sm.mtx.RUnlock()
|
|
|
|
|
|
|
|
return len(sm.m)
|
|
|
|
}
|
|
|
|
|
|
|
|
// get returns a memorySeries for a fingerprint. Return values have the same
|
|
|
|
// semantics as the native Go map.
|
2015-08-20 08:18:46 -07:00
|
|
|
func (sm *seriesMap) get(fp model.Fingerprint) (s *memorySeries, ok bool) {
|
2014-10-07 10:11:24 -07:00
|
|
|
sm.mtx.RLock()
|
|
|
|
s, ok = sm.m[fp]
|
2016-09-22 08:49:22 -07:00
|
|
|
// Note that the RUnlock is not done via defer for performance reasons.
|
|
|
|
// TODO(beorn7): Once https://github.com/golang/go/issues/14939 is
|
|
|
|
// fixed, revert to the usual defer idiom.
|
|
|
|
sm.mtx.RUnlock()
|
2014-10-07 10:11:24 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-10-10 10:16:07 -07:00
|
|
|
// put adds a mapping to the seriesMap. It panics if s == nil.
|
2015-08-20 08:18:46 -07:00
|
|
|
func (sm *seriesMap) put(fp model.Fingerprint, s *memorySeries) {
|
2014-10-07 10:11:24 -07:00
|
|
|
sm.mtx.Lock()
|
|
|
|
defer sm.mtx.Unlock()
|
|
|
|
|
2014-10-10 10:16:07 -07:00
|
|
|
if s == nil {
|
|
|
|
panic("tried to add nil pointer to seriesMap")
|
|
|
|
}
|
2014-10-07 10:11:24 -07:00
|
|
|
sm.m[fp] = s
|
|
|
|
}
|
|
|
|
|
|
|
|
// del removes a mapping from the series Map.
|
2015-08-20 08:18:46 -07:00
|
|
|
func (sm *seriesMap) del(fp model.Fingerprint) {
|
2014-10-07 10:11:24 -07:00
|
|
|
sm.mtx.Lock()
|
|
|
|
defer sm.mtx.Unlock()
|
|
|
|
|
|
|
|
delete(sm.m, fp)
|
|
|
|
}
|
|
|
|
|
|
|
|
// iter returns a channel that produces all mappings in the seriesMap. The
|
|
|
|
// channel will be closed once all fingerprints have been received. Not
|
|
|
|
// consuming all fingerprints from the channel will leak a goroutine. The
|
2014-10-07 11:09:56 -07:00
|
|
|
// semantics of concurrent modification of seriesMap is the similar as the one
|
|
|
|
// for iterating over a map with a 'range' clause. However, if the next element
|
|
|
|
// in iteration order is removed after the current element has been received
|
|
|
|
// from the channel, it will still be produced by the channel.
|
2014-10-08 04:49:42 -07:00
|
|
|
func (sm *seriesMap) iter() <-chan fingerprintSeriesPair {
|
2014-10-07 10:11:24 -07:00
|
|
|
ch := make(chan fingerprintSeriesPair)
|
|
|
|
go func() {
|
|
|
|
sm.mtx.RLock()
|
|
|
|
for fp, s := range sm.m {
|
|
|
|
sm.mtx.RUnlock()
|
|
|
|
ch <- fingerprintSeriesPair{fp, s}
|
|
|
|
sm.mtx.RLock()
|
|
|
|
}
|
|
|
|
sm.mtx.RUnlock()
|
2014-10-07 11:09:56 -07:00
|
|
|
close(ch)
|
2014-10-07 10:11:24 -07:00
|
|
|
}()
|
|
|
|
return ch
|
|
|
|
}
|
|
|
|
|
storage: Replace fpIter by sortedFPs
The fpIter was kind of cumbersome to use and required a lock for each
iteration (which wasn't even needed for the iteration at startup after
loading the checkpoint).
The new implementation here has an obvious penalty in memory, but it's
only 8 byte per series, so 80MiB for a beefy server with 10M memory
time series (which would probably need ~100GiB RAM, so the memory
penalty is only 0.1% of the total memory need).
The big advantage is that now series maintenance happens in order,
which leads to the time between two maintenances of the same series
being less random. Ideally, after each maintenance, the next
maintenance would tackle the series with the largest number of
non-persisted chunks. That would be quite an effort to find out or
track, but with the approach here, the next maintenance will tackle
the series whose previous maintenance is longest ago, which is a good
approximation.
While this commit won't change the _average_ number of chunks
persisted per maintenance, it will reduce the mean time a given chunk
has to wait for its persistence and thus reduce the steady-state
number of chunks waiting for persistence.
Also, the map iteration in Go is non-deterministic but not truly
random. In practice, the iteration appears to be somewhat "bucketed".
You can often observe a bunch of series with similar duration since
their last maintenance, i.e. you see batches of series with similar
number of chunks persisted per maintenance. If that batch is
relatively young, a whole lot of series are maintained with very few
chunks to persist. (See screenshot in PR for a better explanation.)
2017-03-27 10:52:53 -07:00
|
|
|
// sortedFPs returns a sorted slice of all the fingerprints in the seriesMap.
|
|
|
|
func (sm *seriesMap) sortedFPs() model.Fingerprints {
|
|
|
|
sm.mtx.RLock()
|
|
|
|
fps := make(model.Fingerprints, 0, len(sm.m))
|
|
|
|
for fp := range sm.m {
|
|
|
|
fps = append(fps, fp)
|
|
|
|
}
|
|
|
|
sm.mtx.RUnlock()
|
|
|
|
|
|
|
|
// Sorting could take some time, so do it outside of the lock.
|
|
|
|
sort.Sort(fps)
|
|
|
|
return fps
|
2014-10-07 10:11:24 -07:00
|
|
|
}
|
|
|
|
|
2014-06-06 02:55:53 -07:00
|
|
|
type memorySeries struct {
|
2015-08-20 08:18:46 -07:00
|
|
|
metric model.Metric
|
2014-09-10 09:41:52 -07:00
|
|
|
// Sorted by start time, overlapping chunk ranges are forbidden.
|
2016-09-21 14:44:27 -07:00
|
|
|
chunkDescs []*chunk.Desc
|
2016-09-28 14:33:34 -07:00
|
|
|
// The index (within chunkDescs above) of the first chunk.Desc that
|
2015-03-08 18:33:10 -07:00
|
|
|
// points to a non-persisted chunk. If all chunks are persisted, then
|
|
|
|
// persistWatermark == len(chunkDescs).
|
|
|
|
persistWatermark int
|
2015-03-19 04:59:26 -07:00
|
|
|
// The modification time of the series file. The zero value of time.Time
|
|
|
|
// is used to mark an unknown modification time.
|
|
|
|
modTime time.Time
|
2014-10-27 12:40:48 -07:00
|
|
|
// The chunkDescs in memory might not have all the chunkDescs for the
|
|
|
|
// chunks that are persisted to disk. The missing chunkDescs are all
|
|
|
|
// contiguous and at the tail end. chunkDescsOffset is the index of the
|
2016-09-28 14:33:34 -07:00
|
|
|
// chunk on disk that corresponds to the first chunk.Desc in memory. If
|
2014-10-27 12:40:48 -07:00
|
|
|
// it is 0, the chunkDescs are all loaded. A value of -1 denotes a
|
|
|
|
// special case: There are chunks on disk, but the offset to the
|
2015-03-08 18:33:10 -07:00
|
|
|
// chunkDescs in memory is unknown. Also, in this special case, there is
|
|
|
|
// no overlap between chunks on disk and chunks in memory (implying that
|
|
|
|
// upon first persisting of a chunk in memory, the offset has to be
|
|
|
|
// set).
|
2014-10-27 12:40:48 -07:00
|
|
|
chunkDescsOffset int
|
2014-11-05 11:02:45 -08:00
|
|
|
// The savedFirstTime field is used as a fallback when the
|
2016-09-21 14:44:27 -07:00
|
|
|
// chunkDescsOffset is not 0. It can be used to save the FirstTime of the
|
2014-11-05 11:02:45 -08:00
|
|
|
// first chunk before its chunk desc is evicted. In doubt, this field is
|
|
|
|
// just set to the oldest possible timestamp.
|
2015-08-20 08:18:46 -07:00
|
|
|
savedFirstTime model.Time
|
2016-02-19 09:16:41 -08:00
|
|
|
// The timestamp of the last sample in this series. Needed for fast
|
|
|
|
// access for federation and to ensure timestamp monotonicity during
|
|
|
|
// ingestion.
|
2015-08-20 08:18:46 -07:00
|
|
|
lastTime model.Time
|
2016-02-19 09:16:41 -08:00
|
|
|
// The last ingested sample value. Needed for fast access for
|
|
|
|
// federation.
|
|
|
|
lastSampleValue model.SampleValue
|
|
|
|
// Whether lastSampleValue has been set already.
|
|
|
|
lastSampleValueSet bool
|
2015-03-08 18:33:10 -07:00
|
|
|
// Whether the current head chunk has already been finished. If true,
|
|
|
|
// the current head chunk must not be modified anymore.
|
|
|
|
headChunkClosed bool
|
2014-10-27 07:55:44 -07:00
|
|
|
// Whether the current head chunk is used by an iterator. In that case,
|
2015-03-08 18:33:10 -07:00
|
|
|
// a non-closed head chunk has to be cloned before more samples are
|
2014-10-27 07:55:44 -07:00
|
|
|
// appended.
|
|
|
|
headChunkUsedByIterator bool
|
2015-03-08 18:33:10 -07:00
|
|
|
// Whether the series is inconsistent with the last checkpoint in a way
|
2015-03-18 11:09:07 -07:00
|
|
|
// that would require a disk seek during crash recovery.
|
2015-03-08 18:33:10 -07:00
|
|
|
dirty bool
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// newMemorySeries returns a pointer to a newly allocated memorySeries for the
|
2015-07-13 12:12:27 -07:00
|
|
|
// given metric. chunkDescs and modTime in the new series are set according to
|
|
|
|
// the provided parameters. chunkDescs can be nil or empty if this is a
|
|
|
|
// genuinely new time series (i.e. not one that is being unarchived). In that
|
|
|
|
// case, headChunkClosed is set to false, and firstTime and lastTime are both
|
2015-08-20 08:18:46 -07:00
|
|
|
// set to model.Earliest. The zero value for modTime can be used if the
|
2015-07-13 12:12:27 -07:00
|
|
|
// modification time of the series file is unknown (e.g. if this is a genuinely
|
|
|
|
// new series).
|
2016-09-21 14:44:27 -07:00
|
|
|
func newMemorySeries(m model.Metric, chunkDescs []*chunk.Desc, modTime time.Time) (*memorySeries, error) {
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
var err error
|
2015-08-20 08:18:46 -07:00
|
|
|
firstTime := model.Earliest
|
|
|
|
lastTime := model.Earliest
|
2015-07-13 12:12:27 -07:00
|
|
|
if len(chunkDescs) > 0 {
|
2016-09-21 14:44:27 -07:00
|
|
|
firstTime = chunkDescs[0].FirstTime()
|
|
|
|
if lastTime, err = chunkDescs[len(chunkDescs)-1].LastTime(); err != nil {
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
return nil, err
|
|
|
|
}
|
2014-11-05 11:02:45 -08:00
|
|
|
}
|
2015-07-13 12:12:27 -07:00
|
|
|
return &memorySeries{
|
|
|
|
metric: m,
|
|
|
|
chunkDescs: chunkDescs,
|
|
|
|
headChunkClosed: len(chunkDescs) > 0,
|
|
|
|
savedFirstTime: firstTime,
|
|
|
|
lastTime: lastTime,
|
|
|
|
persistWatermark: len(chunkDescs),
|
|
|
|
modTime: modTime,
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
}, nil
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2015-03-08 18:33:10 -07:00
|
|
|
// add adds a sample pair to the series. It returns the number of newly
|
|
|
|
// completed chunks (which are now eligible for persistence).
|
|
|
|
//
|
2014-10-07 10:11:24 -07:00
|
|
|
// The caller must have locked the fingerprint of the series.
|
2016-10-02 15:04:56 -07:00
|
|
|
func (s *memorySeries) add(v model.SamplePair) (int, error) {
|
2015-03-08 18:33:10 -07:00
|
|
|
if len(s.chunkDescs) == 0 || s.headChunkClosed {
|
2016-09-28 14:33:34 -07:00
|
|
|
newHead := chunk.NewDesc(chunk.New(), v.Timestamp)
|
2014-09-16 06:47:24 -07:00
|
|
|
s.chunkDescs = append(s.chunkDescs, newHead)
|
2015-03-08 18:33:10 -07:00
|
|
|
s.headChunkClosed = false
|
2016-09-21 14:44:27 -07:00
|
|
|
} else if s.headChunkUsedByIterator && s.head().RefCount() > 1 {
|
2014-10-27 07:55:44 -07:00
|
|
|
// We only need to clone the head chunk if the current head
|
|
|
|
// chunk was used in an iterator at all and if the refCount is
|
|
|
|
// still greater than the 1 we always have because the head
|
|
|
|
// chunk is not yet persisted. The latter is just an
|
|
|
|
// approximation. We will still clone unnecessarily if an older
|
|
|
|
// iterator using a previous version of the head chunk is still
|
|
|
|
// around and keep the head chunk pinned. We needed to track
|
|
|
|
// pins by version of the head chunk, which is probably not
|
|
|
|
// worth the effort.
|
2016-09-28 14:33:34 -07:00
|
|
|
chunk.Ops.WithLabelValues(chunk.Clone).Inc()
|
2014-10-27 07:55:44 -07:00
|
|
|
// No locking needed here because a non-persisted head chunk can
|
|
|
|
// not get evicted concurrently.
|
2016-09-21 14:44:27 -07:00
|
|
|
s.head().C = s.head().C.Clone()
|
2014-10-27 07:55:44 -07:00
|
|
|
s.headChunkUsedByIterator = false
|
2014-09-16 06:47:24 -07:00
|
|
|
}
|
|
|
|
|
2016-09-21 08:56:55 -07:00
|
|
|
chunks, err := s.head().Add(v)
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2016-09-21 14:44:27 -07:00
|
|
|
s.head().C = chunks[0]
|
2014-06-06 02:55:53 -07:00
|
|
|
|
2015-03-08 18:33:10 -07:00
|
|
|
for _, c := range chunks[1:] {
|
2016-09-21 14:44:27 -07:00
|
|
|
s.chunkDescs = append(s.chunkDescs, chunk.NewDesc(c, c.FirstTime()))
|
2016-02-11 08:36:13 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Populate lastTime of now-closed chunks.
|
|
|
|
for _, cd := range s.chunkDescs[len(s.chunkDescs)-len(chunks) : len(s.chunkDescs)-1] {
|
2017-02-01 11:14:01 -08:00
|
|
|
if err := cd.MaybePopulateLastTime(); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2015-03-08 18:33:10 -07:00
|
|
|
}
|
2015-07-13 12:12:27 -07:00
|
|
|
|
|
|
|
s.lastTime = v.Timestamp
|
2016-02-19 09:16:41 -08:00
|
|
|
s.lastSampleValue = v.Value
|
|
|
|
s.lastSampleValueSet = true
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
return len(chunks) - 1, nil
|
2015-03-08 18:33:10 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// maybeCloseHeadChunk closes the head chunk if it has not been touched for the
|
storage: Use staleness delta as head chunk timeout
Currently, if a series stops to exist, its head chunk will be kept
open for an hour. That prevents it from being persisted. Which
prevents it from being evicted. Which prevents the series from being
archived.
Most of the time, once no sample has been added to a series within the
staleness limit, we can be pretty confident that this series will not
receive samples anymore. The whole chain as described above can be
started after 5m instead of 1h. In the relaxed case, this doesn't
change a lot as the head chunk timeout is only checked during series
maintenance, and usually, a series is only maintained every six
hours. However, there is the typical scenario where a large service is
deployed, the deoply turns out to be bad, and then it is deployed
again within minutes, and quite quickly the number of time series has
tripled. That's the point where the Prometheus server is stressed and
switches (rightfully) into rushed mode. In that mode, time series are
processed as quickly as possible, but all of that is in vein if all of
those recently ended time series cannot be persisted yet for another
hour. In that scenario, this change will help most, and it's exactly
the scenario where help is most desperately needed.
2017-03-26 14:44:50 -07:00
|
|
|
// provided duration. It returns whether the head chunk was closed. If the head
|
|
|
|
// chunk is already closed, the method is a no-op and returns false.
|
2015-03-08 18:33:10 -07:00
|
|
|
//
|
|
|
|
// The caller must have locked the fingerprint of the series.
|
storage: Use staleness delta as head chunk timeout
Currently, if a series stops to exist, its head chunk will be kept
open for an hour. That prevents it from being persisted. Which
prevents it from being evicted. Which prevents the series from being
archived.
Most of the time, once no sample has been added to a series within the
staleness limit, we can be pretty confident that this series will not
receive samples anymore. The whole chain as described above can be
started after 5m instead of 1h. In the relaxed case, this doesn't
change a lot as the head chunk timeout is only checked during series
maintenance, and usually, a series is only maintained every six
hours. However, there is the typical scenario where a large service is
deployed, the deoply turns out to be bad, and then it is deployed
again within minutes, and quite quickly the number of time series has
tripled. That's the point where the Prometheus server is stressed and
switches (rightfully) into rushed mode. In that mode, time series are
processed as quickly as possible, but all of that is in vein if all of
those recently ended time series cannot be persisted yet for another
hour. In that scenario, this change will help most, and it's exactly
the scenario where help is most desperately needed.
2017-03-26 14:44:50 -07:00
|
|
|
func (s *memorySeries) maybeCloseHeadChunk(timeout time.Duration) (bool, error) {
|
2015-03-08 18:33:10 -07:00
|
|
|
if s.headChunkClosed {
|
2017-02-01 11:14:01 -08:00
|
|
|
return false, nil
|
2015-03-08 18:33:10 -07:00
|
|
|
}
|
2017-09-07 03:00:45 -07:00
|
|
|
if time.Since(s.lastTime.Time()) > timeout {
|
2015-03-08 18:33:10 -07:00
|
|
|
s.headChunkClosed = true
|
|
|
|
// Since we cannot modify the head chunk from now on, we
|
|
|
|
// don't need to bother with cloning anymore.
|
|
|
|
s.headChunkUsedByIterator = false
|
2017-02-01 11:14:01 -08:00
|
|
|
return true, s.head().MaybePopulateLastTime()
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
2017-02-01 11:14:01 -08:00
|
|
|
return false, nil
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2017-02-04 13:29:37 -08:00
|
|
|
// evictChunkDescs evicts chunkDescs. lenToEvict is the index within the current
|
|
|
|
// chunkDescs of the oldest chunk that is not evicted.
|
|
|
|
func (s *memorySeries) evictChunkDescs(lenToEvict int) {
|
|
|
|
if lenToEvict < 1 {
|
|
|
|
return
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
2017-02-04 14:40:22 -08:00
|
|
|
if s.chunkDescsOffset < 0 {
|
|
|
|
panic("chunk desc eviction requested with unknown chunk desc offset")
|
|
|
|
}
|
2017-02-04 13:29:37 -08:00
|
|
|
lenToKeep := len(s.chunkDescs) - lenToEvict
|
|
|
|
s.savedFirstTime = s.firstTime()
|
|
|
|
s.chunkDescsOffset += lenToEvict
|
|
|
|
s.persistWatermark -= lenToEvict
|
|
|
|
chunk.DescOps.WithLabelValues(chunk.Evict).Add(float64(lenToEvict))
|
|
|
|
chunk.NumMemDescs.Sub(float64(lenToEvict))
|
|
|
|
s.chunkDescs = append(
|
|
|
|
make([]*chunk.Desc, 0, lenToKeep),
|
|
|
|
s.chunkDescs[lenToEvict:]...,
|
|
|
|
)
|
|
|
|
s.dirty = true
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2015-03-08 18:33:10 -07:00
|
|
|
// dropChunks removes chunkDescs older than t. The caller must have locked the
|
|
|
|
// fingerprint of the series.
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
func (s *memorySeries) dropChunks(t model.Time) error {
|
2014-06-06 02:55:53 -07:00
|
|
|
keepIdx := len(s.chunkDescs)
|
|
|
|
for i, cd := range s.chunkDescs {
|
2016-09-21 14:44:27 -07:00
|
|
|
lt, err := cd.LastTime()
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !lt.Before(t) {
|
2014-06-06 02:55:53 -07:00
|
|
|
keepIdx = i
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2016-04-15 10:18:40 -07:00
|
|
|
if keepIdx == len(s.chunkDescs) && !s.headChunkClosed {
|
|
|
|
// Never drop an open head chunk.
|
|
|
|
keepIdx--
|
|
|
|
}
|
|
|
|
if keepIdx <= 0 {
|
|
|
|
// Nothing to drop.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
s.chunkDescs = append(
|
2016-09-21 14:44:27 -07:00
|
|
|
make([]*chunk.Desc, 0, len(s.chunkDescs)-keepIdx),
|
2016-04-15 10:18:40 -07:00
|
|
|
s.chunkDescs[keepIdx:]...,
|
|
|
|
)
|
|
|
|
s.persistWatermark -= keepIdx
|
|
|
|
if s.persistWatermark < 0 {
|
|
|
|
panic("dropped unpersisted chunks from memory")
|
|
|
|
}
|
|
|
|
if s.chunkDescsOffset != -1 {
|
|
|
|
s.chunkDescsOffset += keepIdx
|
2014-10-27 12:40:48 -07:00
|
|
|
}
|
2016-09-28 14:33:34 -07:00
|
|
|
chunk.NumMemDescs.Sub(float64(keepIdx))
|
2016-04-15 10:18:40 -07:00
|
|
|
s.dirty = true
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
return nil
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// preloadChunks is an internal helper method.
|
2015-05-06 07:53:12 -07:00
|
|
|
func (s *memorySeries) preloadChunks(
|
2016-06-28 23:14:23 -07:00
|
|
|
indexes []int, fp model.Fingerprint, mss *MemorySeriesStorage,
|
2016-07-11 11:27:25 -07:00
|
|
|
) (SeriesIterator, error) {
|
2014-06-06 02:55:53 -07:00
|
|
|
loadIndexes := []int{}
|
2016-09-21 14:44:27 -07:00
|
|
|
pinnedChunkDescs := make([]*chunk.Desc, 0, len(indexes))
|
2014-06-06 02:55:53 -07:00
|
|
|
for _, idx := range indexes {
|
2014-10-22 10:21:23 -07:00
|
|
|
cd := s.chunkDescs[idx]
|
|
|
|
pinnedChunkDescs = append(pinnedChunkDescs, cd)
|
2016-09-21 14:44:27 -07:00
|
|
|
cd.Pin(mss.evictRequests) // Have to pin everything first to prevent immediate eviction on chunk loading.
|
|
|
|
if cd.IsEvicted() {
|
2014-06-06 02:55:53 -07:00
|
|
|
loadIndexes = append(loadIndexes, idx)
|
|
|
|
}
|
|
|
|
}
|
2016-09-28 14:33:34 -07:00
|
|
|
chunk.Ops.WithLabelValues(chunk.Pin).Add(float64(len(pinnedChunkDescs)))
|
2014-06-06 02:55:53 -07:00
|
|
|
|
|
|
|
if len(loadIndexes) > 0 {
|
2014-10-27 12:40:48 -07:00
|
|
|
if s.chunkDescsOffset == -1 {
|
|
|
|
panic("requested loading chunks from persistence in a situation where we must not have persisted data for chunk descriptors in memory")
|
|
|
|
}
|
2014-11-27 11:46:45 -08:00
|
|
|
chunks, err := mss.loadChunks(fp, loadIndexes, s.chunkDescsOffset)
|
2014-06-06 02:55:53 -07:00
|
|
|
if err != nil {
|
2014-10-22 10:21:23 -07:00
|
|
|
// Unpin the chunks since we won't return them as pinned chunks now.
|
2014-08-20 06:05:58 -07:00
|
|
|
for _, cd := range pinnedChunkDescs {
|
2016-09-21 14:44:27 -07:00
|
|
|
cd.Unpin(mss.evictRequests)
|
2014-08-20 06:05:58 -07:00
|
|
|
}
|
2016-09-28 14:33:34 -07:00
|
|
|
chunk.Ops.WithLabelValues(chunk.Unpin).Add(float64(len(pinnedChunkDescs)))
|
2016-07-11 11:27:25 -07:00
|
|
|
return nopIter, err
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
for i, c := range chunks {
|
2016-09-21 14:44:27 -07:00
|
|
|
s.chunkDescs[loadIndexes[i]].SetChunk(c)
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
if !s.headChunkClosed && indexes[len(indexes)-1] == len(s.chunkDescs)-1 {
|
|
|
|
s.headChunkUsedByIterator = true
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
curriedQuarantineSeries := func(err error) {
|
|
|
|
mss.quarantineSeries(fp, s.metric, err)
|
|
|
|
}
|
|
|
|
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
iter := &boundedIterator{
|
2016-10-02 15:04:56 -07:00
|
|
|
it: s.newIterator(pinnedChunkDescs, curriedQuarantineSeries, mss.evictRequests),
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
start: model.Now().Add(-mss.dropAfter),
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2016-07-11 11:27:25 -07:00
|
|
|
return iter, nil
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
}
|
|
|
|
|
2016-10-02 15:04:56 -07:00
|
|
|
// newIterator returns a new SeriesIterator for the provided chunkDescs (which
|
2016-02-19 09:35:30 -08:00
|
|
|
// must be pinned).
|
|
|
|
//
|
|
|
|
// The caller must have locked the fingerprint of the memorySeries.
|
2016-10-02 15:04:56 -07:00
|
|
|
func (s *memorySeries) newIterator(
|
2016-09-21 14:44:27 -07:00
|
|
|
pinnedChunkDescs []*chunk.Desc,
|
2016-07-11 11:27:25 -07:00
|
|
|
quarantine func(error),
|
2016-09-21 14:44:27 -07:00
|
|
|
evictRequests chan<- chunk.EvictRequest,
|
2016-07-11 11:27:25 -07:00
|
|
|
) SeriesIterator {
|
2016-09-21 14:44:27 -07:00
|
|
|
chunks := make([]chunk.Chunk, 0, len(pinnedChunkDescs))
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
for _, cd := range pinnedChunkDescs {
|
|
|
|
// It's OK to directly access cd.c here (without locking) as the
|
|
|
|
// series FP is locked and the chunk is pinned.
|
2016-09-21 14:44:27 -07:00
|
|
|
chunks = append(chunks, cd.C)
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
}
|
|
|
|
return &memorySeriesIterator{
|
2016-07-11 11:27:25 -07:00
|
|
|
chunks: chunks,
|
2016-09-21 14:44:27 -07:00
|
|
|
chunkIts: make([]chunk.Iterator, len(chunks)),
|
2016-07-11 11:27:25 -07:00
|
|
|
quarantine: quarantine,
|
|
|
|
metric: s.metric,
|
|
|
|
pinnedChunkDescs: pinnedChunkDescs,
|
|
|
|
evictRequests: evictRequests,
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
}
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2016-03-09 05:50:09 -08:00
|
|
|
// preloadChunksForInstant preloads chunks for the latest value in the given
|
|
|
|
// range. If the last sample saved in the memorySeries itself is the latest
|
|
|
|
// value in the given range, it will in fact preload zero chunks and just take
|
|
|
|
// that value.
|
|
|
|
func (s *memorySeries) preloadChunksForInstant(
|
|
|
|
fp model.Fingerprint,
|
|
|
|
from model.Time, through model.Time,
|
2016-06-28 23:14:23 -07:00
|
|
|
mss *MemorySeriesStorage,
|
2016-07-11 11:27:25 -07:00
|
|
|
) (SeriesIterator, error) {
|
2017-03-16 03:49:41 -07:00
|
|
|
// If we have a lastSamplePair in the series, and this last samplePair
|
2016-03-09 05:50:09 -08:00
|
|
|
// is in the interval, just take it in a singleSampleSeriesIterator. No
|
|
|
|
// need to pin or load anything.
|
|
|
|
lastSample := s.lastSamplePair()
|
|
|
|
if !through.Before(lastSample.Timestamp) &&
|
|
|
|
!from.After(lastSample.Timestamp) &&
|
2016-09-28 14:40:26 -07:00
|
|
|
lastSample != model.ZeroSamplePair {
|
2016-03-09 05:50:09 -08:00
|
|
|
iter := &boundedIterator{
|
2016-07-11 11:27:25 -07:00
|
|
|
it: &singleSampleSeriesIterator{
|
|
|
|
samplePair: lastSample,
|
|
|
|
metric: s.metric,
|
|
|
|
},
|
2016-03-09 05:50:09 -08:00
|
|
|
start: model.Now().Add(-mss.dropAfter),
|
|
|
|
}
|
2016-07-11 11:27:25 -07:00
|
|
|
return iter, nil
|
2016-03-09 05:50:09 -08:00
|
|
|
}
|
|
|
|
// If we are here, we are out of luck and have to delegate to the more
|
|
|
|
// expensive method.
|
|
|
|
return s.preloadChunksForRange(fp, from, through, mss)
|
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// preloadChunksForRange loads chunks for the given range from the persistence.
|
|
|
|
// The caller must have locked the fingerprint of the series.
|
2014-10-14 09:23:32 -07:00
|
|
|
func (s *memorySeries) preloadChunksForRange(
|
2016-02-19 09:35:30 -08:00
|
|
|
fp model.Fingerprint,
|
2015-08-20 08:18:46 -07:00
|
|
|
from model.Time, through model.Time,
|
2016-06-28 23:14:23 -07:00
|
|
|
mss *MemorySeriesStorage,
|
2016-07-11 11:27:25 -07:00
|
|
|
) (SeriesIterator, error) {
|
2015-08-20 08:18:46 -07:00
|
|
|
firstChunkDescTime := model.Latest
|
2014-10-14 09:23:32 -07:00
|
|
|
if len(s.chunkDescs) > 0 {
|
2016-09-21 14:44:27 -07:00
|
|
|
firstChunkDescTime = s.chunkDescs[0].FirstTime()
|
2014-10-14 09:23:32 -07:00
|
|
|
}
|
2014-10-27 12:40:48 -07:00
|
|
|
if s.chunkDescsOffset != 0 && from.Before(firstChunkDescTime) {
|
2015-07-06 16:10:14 -07:00
|
|
|
cds, err := mss.loadChunkDescs(fp, s.persistWatermark)
|
2014-10-14 09:23:32 -07:00
|
|
|
if err != nil {
|
2016-07-11 11:27:25 -07:00
|
|
|
return nopIter, err
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
2016-12-13 09:41:02 -08:00
|
|
|
if s.chunkDescsOffset != -1 && len(cds) != s.chunkDescsOffset {
|
|
|
|
return nopIter, fmt.Errorf(
|
|
|
|
"unexpected number of chunk descs loaded for fingerprint %v: expected %d, got %d",
|
|
|
|
fp, s.chunkDescsOffset, len(cds),
|
|
|
|
)
|
|
|
|
}
|
2017-02-04 17:25:09 -08:00
|
|
|
s.persistWatermark += len(cds)
|
2014-10-14 09:23:32 -07:00
|
|
|
s.chunkDescs = append(cds, s.chunkDescs...)
|
2014-10-27 12:40:48 -07:00
|
|
|
s.chunkDescsOffset = 0
|
2016-12-13 09:41:02 -08:00
|
|
|
if len(s.chunkDescs) > 0 {
|
|
|
|
firstChunkDescTime = s.chunkDescs[0].FirstTime()
|
|
|
|
}
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
if len(s.chunkDescs) == 0 || through.Before(firstChunkDescTime) {
|
2016-07-11 11:27:25 -07:00
|
|
|
return nopIter, nil
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Find first chunk with start time after "from".
|
|
|
|
fromIdx := sort.Search(len(s.chunkDescs), func(i int) bool {
|
2016-09-21 14:44:27 -07:00
|
|
|
return s.chunkDescs[i].FirstTime().After(from)
|
2014-06-06 02:55:53 -07:00
|
|
|
})
|
|
|
|
// Find first chunk with start time after "through".
|
|
|
|
throughIdx := sort.Search(len(s.chunkDescs), func(i int) bool {
|
2016-09-21 14:44:27 -07:00
|
|
|
return s.chunkDescs[i].FirstTime().After(through)
|
2014-06-06 02:55:53 -07:00
|
|
|
})
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
if fromIdx == len(s.chunkDescs) {
|
|
|
|
// Even the last chunk starts before "from". Find out if the
|
|
|
|
// series ends before "from" and we don't need to do anything.
|
2016-09-21 14:44:27 -07:00
|
|
|
lt, err := s.chunkDescs[len(s.chunkDescs)-1].LastTime()
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
if err != nil {
|
2016-07-11 11:27:25 -07:00
|
|
|
return nopIter, err
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
}
|
|
|
|
if lt.Before(from) {
|
2016-07-11 11:27:25 -07:00
|
|
|
return nopIter, nil
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
}
|
|
|
|
}
|
2014-06-06 02:55:53 -07:00
|
|
|
if fromIdx > 0 {
|
|
|
|
fromIdx--
|
|
|
|
}
|
|
|
|
if throughIdx == len(s.chunkDescs) {
|
|
|
|
throughIdx--
|
|
|
|
}
|
2016-10-25 05:59:33 -07:00
|
|
|
if fromIdx > throughIdx {
|
|
|
|
// Guard against nonsensical result. The caller will quarantine the series with a meaningful log entry.
|
|
|
|
return nopIter, fmt.Errorf("fromIdx=%d is greater than throughIdx=%d, likely caused by data corruption", fromIdx, throughIdx)
|
|
|
|
}
|
2014-06-06 02:55:53 -07:00
|
|
|
|
|
|
|
pinIndexes := make([]int, 0, throughIdx-fromIdx+1)
|
|
|
|
for i := fromIdx; i <= throughIdx; i++ {
|
|
|
|
pinIndexes = append(pinIndexes, i)
|
|
|
|
}
|
2015-05-06 07:53:12 -07:00
|
|
|
return s.preloadChunks(pinIndexes, fp, mss)
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2014-10-28 11:01:41 -07:00
|
|
|
// head returns a pointer to the head chunk descriptor. The caller must have
|
Fix a bug handling freshly unarchived series.
Usually, if you unarchive a series, it is to add something to it,
which will create a new head chunk. However, if a series in
unarchived, and before anything is added to it, it is handled by the
maintenance loop, it will be archived again. In that case, we have to
load the chunkDescs to know the lastTime of the series to be
archived. Usually, this case will happen only rarely (as a race, has
never happened so far, possibly because the locking around unarchiving
and the subsequent sample append is smart enough). However, during
crash recovery, we sometimes treat series as "freshly unarchived"
without directly appending a sample. We might add more cases of that
type later, so better deal with archiving properly and load chunkDescs
if required.
2015-01-08 07:10:31 -08:00
|
|
|
// locked the fingerprint of the memorySeries. This method will panic if this
|
|
|
|
// series has no chunk descriptors.
|
2016-09-21 14:44:27 -07:00
|
|
|
func (s *memorySeries) head() *chunk.Desc {
|
2014-06-06 02:55:53 -07:00
|
|
|
return s.chunkDescs[len(s.chunkDescs)-1]
|
|
|
|
}
|
|
|
|
|
2016-02-19 09:16:41 -08:00
|
|
|
// firstTime returns the timestamp of the first sample in the series.
|
|
|
|
//
|
|
|
|
// The caller must have locked the fingerprint of the memorySeries.
|
2016-09-26 04:06:06 -07:00
|
|
|
func (s *memorySeries) firstTime() model.Time {
|
2014-11-05 11:02:45 -08:00
|
|
|
if s.chunkDescsOffset == 0 && len(s.chunkDescs) > 0 {
|
2016-09-21 14:44:27 -07:00
|
|
|
return s.chunkDescs[0].FirstTime()
|
2014-11-05 11:02:45 -08:00
|
|
|
}
|
|
|
|
return s.savedFirstTime
|
2014-09-15 10:24:26 -07:00
|
|
|
}
|
|
|
|
|
2016-02-19 09:16:41 -08:00
|
|
|
// lastSamplePair returns the last ingested SamplePair. It returns
|
2016-09-28 14:40:26 -07:00
|
|
|
// model.ZeroSamplePair if this memorySeries has never received a sample (via the add
|
2016-02-19 09:16:41 -08:00
|
|
|
// method), which is the case for freshly unarchived series or newly created
|
|
|
|
// ones and also for all series after a server restart. However, in that case,
|
|
|
|
// series will most likely be considered stale anyway.
|
|
|
|
//
|
|
|
|
// The caller must have locked the fingerprint of the memorySeries.
|
|
|
|
func (s *memorySeries) lastSamplePair() model.SamplePair {
|
|
|
|
if !s.lastSampleValueSet {
|
2016-09-28 14:40:26 -07:00
|
|
|
return model.ZeroSamplePair
|
2016-02-19 09:16:41 -08:00
|
|
|
}
|
|
|
|
return model.SamplePair{
|
|
|
|
Timestamp: s.lastTime,
|
|
|
|
Value: s.lastSampleValue,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-20 10:13:06 -07:00
|
|
|
// chunksToPersist returns a slice of chunkDescs eligible for persistence. It's
|
|
|
|
// the caller's responsibility to actually persist the returned chunks
|
|
|
|
// afterwards. The method sets the persistWatermark and the dirty flag
|
|
|
|
// accordingly.
|
2015-03-08 18:33:10 -07:00
|
|
|
//
|
|
|
|
// The caller must have locked the fingerprint of the series.
|
2016-09-21 14:44:27 -07:00
|
|
|
func (s *memorySeries) chunksToPersist() []*chunk.Desc {
|
2015-03-08 18:33:10 -07:00
|
|
|
newWatermark := len(s.chunkDescs)
|
|
|
|
if !s.headChunkClosed {
|
|
|
|
newWatermark--
|
|
|
|
}
|
|
|
|
if newWatermark == s.persistWatermark {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
cds := s.chunkDescs[s.persistWatermark:newWatermark]
|
|
|
|
s.dirty = true
|
|
|
|
s.persistWatermark = newWatermark
|
|
|
|
return cds
|
|
|
|
}
|
|
|
|
|
2014-10-14 04:52:39 -07:00
|
|
|
// memorySeriesIterator implements SeriesIterator.
|
|
|
|
type memorySeriesIterator struct {
|
2016-09-21 14:44:27 -07:00
|
|
|
// Last chunk.Iterator used by ValueAtOrBeforeTime.
|
|
|
|
chunkIt chunk.Iterator
|
2016-07-11 11:27:25 -07:00
|
|
|
// Caches chunkIterators.
|
2016-09-21 14:44:27 -07:00
|
|
|
chunkIts []chunk.Iterator
|
2016-07-11 11:27:25 -07:00
|
|
|
// The actual sample chunks.
|
2016-09-21 14:44:27 -07:00
|
|
|
chunks []chunk.Chunk
|
2016-07-11 11:27:25 -07:00
|
|
|
// Call to quarantine the series this iterator belongs to.
|
|
|
|
quarantine func(error)
|
|
|
|
// The metric corresponding to the iterator.
|
|
|
|
metric model.Metric
|
|
|
|
// Chunks that were pinned for this iterator.
|
2016-09-21 14:44:27 -07:00
|
|
|
pinnedChunkDescs []*chunk.Desc
|
2016-07-11 11:27:25 -07:00
|
|
|
// Where to send evict requests when unpinning pinned chunks.
|
2016-09-21 14:44:27 -07:00
|
|
|
evictRequests chan<- chunk.EvictRequest
|
2014-10-14 04:52:39 -07:00
|
|
|
}
|
|
|
|
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
// ValueAtOrBeforeTime implements SeriesIterator.
|
|
|
|
func (it *memorySeriesIterator) ValueAtOrBeforeTime(t model.Time) model.SamplePair {
|
2014-06-06 02:55:53 -07:00
|
|
|
// The most common case. We are iterating through a chunk.
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
if it.chunkIt != nil {
|
2016-09-21 08:56:55 -07:00
|
|
|
containsT, err := it.chunkIt.Contains(t)
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
if err != nil {
|
|
|
|
it.quarantine(err)
|
2016-09-28 14:40:26 -07:00
|
|
|
return model.ZeroSamplePair
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
}
|
|
|
|
if containsT {
|
2016-09-21 08:56:55 -07:00
|
|
|
if it.chunkIt.FindAtOrBefore(t) {
|
|
|
|
return it.chunkIt.Value()
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
}
|
2016-09-21 08:56:55 -07:00
|
|
|
if it.chunkIt.Err() != nil {
|
|
|
|
it.quarantine(it.chunkIt.Err())
|
2016-03-09 07:20:39 -08:00
|
|
|
}
|
2016-09-28 14:40:26 -07:00
|
|
|
return model.ZeroSamplePair
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
}
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(it.chunks) == 0 {
|
2016-09-28 14:40:26 -07:00
|
|
|
return model.ZeroSamplePair
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2016-09-21 08:56:55 -07:00
|
|
|
// Find the last chunk where FirstTime() is before or equal to t.
|
2015-04-14 04:46:38 -07:00
|
|
|
l := len(it.chunks) - 1
|
2014-06-06 02:55:53 -07:00
|
|
|
i := sort.Search(len(it.chunks), func(i int) bool {
|
2016-09-21 08:56:55 -07:00
|
|
|
return !it.chunks[l-i].FirstTime().After(t)
|
2014-06-06 02:55:53 -07:00
|
|
|
})
|
|
|
|
if i == len(it.chunks) {
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
// Even the first chunk starts after t.
|
2016-09-28 14:40:26 -07:00
|
|
|
return model.ZeroSamplePair
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
2015-05-20 10:13:06 -07:00
|
|
|
it.chunkIt = it.chunkIterator(l - i)
|
2016-09-21 08:56:55 -07:00
|
|
|
if it.chunkIt.FindAtOrBefore(t) {
|
|
|
|
return it.chunkIt.Value()
|
2016-03-09 07:20:39 -08:00
|
|
|
}
|
2016-09-21 08:56:55 -07:00
|
|
|
if it.chunkIt.Err() != nil {
|
|
|
|
it.quarantine(it.chunkIt.Err())
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
}
|
2016-09-28 14:40:26 -07:00
|
|
|
return model.ZeroSamplePair
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2015-05-20 10:13:06 -07:00
|
|
|
// RangeValues implements SeriesIterator.
|
2015-08-22 05:52:35 -07:00
|
|
|
func (it *memorySeriesIterator) RangeValues(in metric.Interval) []model.SamplePair {
|
2015-04-14 04:46:38 -07:00
|
|
|
// Find the first chunk for which the first sample is within the interval.
|
2014-06-06 02:55:53 -07:00
|
|
|
i := sort.Search(len(it.chunks), func(i int) bool {
|
2016-09-21 08:56:55 -07:00
|
|
|
return !it.chunks[i].FirstTime().Before(in.OldestInclusive)
|
2014-06-06 02:55:53 -07:00
|
|
|
})
|
2015-04-14 04:46:38 -07:00
|
|
|
// Only now check the last timestamp of the previous chunk (which is
|
|
|
|
// fairly expensive).
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
if i > 0 {
|
2016-09-21 08:56:55 -07:00
|
|
|
lt, err := it.chunkIterator(i - 1).LastTimestamp()
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
if err != nil {
|
|
|
|
it.quarantine(err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if !lt.Before(in.OldestInclusive) {
|
|
|
|
i--
|
|
|
|
}
|
2015-04-14 04:46:38 -07:00
|
|
|
}
|
|
|
|
|
2015-08-22 05:52:35 -07:00
|
|
|
values := []model.SamplePair{}
|
2015-05-04 11:16:01 -07:00
|
|
|
for j, c := range it.chunks[i:] {
|
2016-09-21 08:56:55 -07:00
|
|
|
if c.FirstTime().After(in.NewestInclusive) {
|
2014-06-06 02:55:53 -07:00
|
|
|
break
|
|
|
|
}
|
2016-09-21 14:44:27 -07:00
|
|
|
chValues, err := chunk.RangeValues(it.chunkIterator(i+j), in)
|
Handle errors caused by data corruption more gracefully
This requires all the panic calls upon unexpected data to be converted
into errors returned. This pollute the function signatures quite
lot. Well, this is Go...
The ideas behind this are the following:
- panic only if it's a programming error. Data corruptions happen, and
they are not programming errors.
- If we detect a data corruption, we "quarantine" the series,
essentially removing it from the database and putting its data into
a separate directory for forensics.
- Failure during writing to a series file is not considered corruption
automatically. It will call setDirty, though, so that a
crashrecovery upon the next restart will commence and check for
that.
- Series quarantining and setDirty calls are logged and counted in
metrics, but are hidden from the user of the interfaces in
interface.go, whith the notable exception of Append(). The reasoning
is that we treat corruption by removing the corrupted series, i.e. a
query for it will return no results on its next call anyway, so
return no results right now. In the case of Append(), we want to
tell the user that no data has been appended, though.
Minor side effects:
- Now consistently using filepath.* instead of path.*.
- Introduced structured logging where I touched it. This makes things
less consistent, but a complete change to structured logging would
be out of scope for this PR.
2016-02-25 03:23:42 -08:00
|
|
|
if err != nil {
|
|
|
|
it.quarantine(err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
values = append(values, chValues...)
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
return values
|
|
|
|
}
|
2014-10-14 04:52:39 -07:00
|
|
|
|
2016-07-11 11:27:25 -07:00
|
|
|
func (it *memorySeriesIterator) Metric() metric.Metric {
|
|
|
|
return metric.Metric{Metric: it.metric}
|
|
|
|
}
|
|
|
|
|
2016-09-21 14:44:27 -07:00
|
|
|
// chunkIterator returns the chunk.Iterator for the chunk at position i (and
|
2015-05-04 11:16:01 -07:00
|
|
|
// creates it if needed).
|
2016-09-21 14:44:27 -07:00
|
|
|
func (it *memorySeriesIterator) chunkIterator(i int) chunk.Iterator {
|
2015-05-04 11:16:01 -07:00
|
|
|
chunkIt := it.chunkIts[i]
|
|
|
|
if chunkIt == nil {
|
2016-09-21 08:56:55 -07:00
|
|
|
chunkIt = it.chunks[i].NewIterator()
|
2015-05-04 11:16:01 -07:00
|
|
|
it.chunkIts[i] = chunkIt
|
|
|
|
}
|
|
|
|
return chunkIt
|
|
|
|
}
|
|
|
|
|
2016-07-11 11:27:25 -07:00
|
|
|
func (it *memorySeriesIterator) Close() {
|
|
|
|
for _, cd := range it.pinnedChunkDescs {
|
2016-09-21 14:44:27 -07:00
|
|
|
cd.Unpin(it.evictRequests)
|
2016-07-11 11:27:25 -07:00
|
|
|
}
|
2016-09-28 14:33:34 -07:00
|
|
|
chunk.Ops.WithLabelValues(chunk.Unpin).Add(float64(len(it.pinnedChunkDescs)))
|
2016-07-11 11:27:25 -07:00
|
|
|
}
|
|
|
|
|
2016-02-19 09:35:30 -08:00
|
|
|
// singleSampleSeriesIterator implements Series Iterator. It is a "shortcut
|
2016-07-11 11:27:25 -07:00
|
|
|
// iterator" that returns a single sample only. The sample is saved in the
|
2016-02-19 09:35:30 -08:00
|
|
|
// iterator itself, so no chunks need to be pinned.
|
|
|
|
type singleSampleSeriesIterator struct {
|
|
|
|
samplePair model.SamplePair
|
2016-07-11 11:27:25 -07:00
|
|
|
metric model.Metric
|
2016-02-19 09:35:30 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// ValueAtTime implements SeriesIterator.
|
|
|
|
func (it *singleSampleSeriesIterator) ValueAtOrBeforeTime(t model.Time) model.SamplePair {
|
|
|
|
if it.samplePair.Timestamp.After(t) {
|
2016-09-28 14:40:26 -07:00
|
|
|
return model.ZeroSamplePair
|
2016-02-19 09:35:30 -08:00
|
|
|
}
|
|
|
|
return it.samplePair
|
|
|
|
}
|
|
|
|
|
|
|
|
// RangeValues implements SeriesIterator.
|
|
|
|
func (it *singleSampleSeriesIterator) RangeValues(in metric.Interval) []model.SamplePair {
|
|
|
|
if it.samplePair.Timestamp.After(in.NewestInclusive) ||
|
|
|
|
it.samplePair.Timestamp.Before(in.OldestInclusive) {
|
|
|
|
return []model.SamplePair{}
|
|
|
|
}
|
|
|
|
return []model.SamplePair{it.samplePair}
|
|
|
|
}
|
|
|
|
|
2016-07-11 11:27:25 -07:00
|
|
|
func (it *singleSampleSeriesIterator) Metric() metric.Metric {
|
|
|
|
return metric.Metric{Metric: it.metric}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close implements SeriesIterator.
|
|
|
|
func (it *singleSampleSeriesIterator) Close() {}
|
|
|
|
|
2014-10-14 04:52:39 -07:00
|
|
|
// nopSeriesIterator implements Series Iterator. It never returns any values.
|
|
|
|
type nopSeriesIterator struct{}
|
|
|
|
|
2015-05-20 10:13:06 -07:00
|
|
|
// ValueAtTime implements SeriesIterator.
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
func (i nopSeriesIterator) ValueAtOrBeforeTime(t model.Time) model.SamplePair {
|
2016-09-28 14:40:26 -07:00
|
|
|
return model.ZeroSamplePair
|
2014-10-14 04:52:39 -07:00
|
|
|
}
|
|
|
|
|
2015-05-20 10:13:06 -07:00
|
|
|
// RangeValues implements SeriesIterator.
|
2015-08-24 06:07:27 -07:00
|
|
|
func (i nopSeriesIterator) RangeValues(in metric.Interval) []model.SamplePair {
|
2015-08-22 05:52:35 -07:00
|
|
|
return []model.SamplePair{}
|
2014-10-14 04:52:39 -07:00
|
|
|
}
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
|
2016-07-11 11:27:25 -07:00
|
|
|
// Metric implements SeriesIterator.
|
|
|
|
func (i nopSeriesIterator) Metric() metric.Metric {
|
|
|
|
return metric.Metric{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close implements SeriesIterator.
|
|
|
|
func (i nopSeriesIterator) Close() {}
|
|
|
|
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
var nopIter nopSeriesIterator // A nopSeriesIterator for convenience. Can be shared.
|