2015-03-30 10:13:36 -07:00
|
|
|
// Copyright 2013 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package promql
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
|
|
|
"time"
|
|
|
|
|
2015-08-20 08:18:46 -07:00
|
|
|
"github.com/prometheus/common/model"
|
2015-08-22 00:42:45 -07:00
|
|
|
"golang.org/x/net/context"
|
2015-03-30 10:13:36 -07:00
|
|
|
|
|
|
|
"github.com/prometheus/prometheus/storage/local"
|
|
|
|
)
|
|
|
|
|
|
|
|
// An Analyzer traverses an expression and determines which data has to be requested
|
|
|
|
// from the storage. It is bound to a context that allows cancellation and timing out.
|
|
|
|
type Analyzer struct {
|
|
|
|
// The storage from which to query data.
|
|
|
|
Storage local.Storage
|
|
|
|
// The expression being analyzed.
|
|
|
|
Expr Expr
|
|
|
|
// The time range for evaluation of Expr.
|
2015-08-20 08:18:46 -07:00
|
|
|
Start, End model.Time
|
2015-03-30 10:13:36 -07:00
|
|
|
|
|
|
|
// The preload times for different query time offsets.
|
|
|
|
offsetPreloadTimes map[time.Duration]preloadTimes
|
|
|
|
}
|
|
|
|
|
|
|
|
// preloadTimes tracks which instants or ranges to preload for a set of
|
|
|
|
// fingerprints. One of these structs is collected for each offset by the query
|
|
|
|
// analyzer.
|
|
|
|
type preloadTimes struct {
|
|
|
|
// Instants require single samples to be loaded along the entire query
|
|
|
|
// range, with intervals between the samples corresponding to the query
|
|
|
|
// resolution.
|
2015-08-20 08:18:46 -07:00
|
|
|
instants map[model.Fingerprint]struct{}
|
2015-03-30 10:13:36 -07:00
|
|
|
// Ranges require loading a range of samples at each resolution step,
|
|
|
|
// stretching backwards from the current evaluation timestamp. The length of
|
|
|
|
// the range into the past is given by the duration, as in "foo[5m]".
|
2015-08-20 08:18:46 -07:00
|
|
|
ranges map[model.Fingerprint]time.Duration
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Analyze the provided expression and attach metrics and fingerprints to data-selecting
|
|
|
|
// AST nodes that are later used to preload the data from the storage.
|
|
|
|
func (a *Analyzer) Analyze(ctx context.Context) error {
|
|
|
|
a.offsetPreloadTimes = map[time.Duration]preloadTimes{}
|
|
|
|
|
|
|
|
getPreloadTimes := func(offset time.Duration) preloadTimes {
|
|
|
|
if _, ok := a.offsetPreloadTimes[offset]; !ok {
|
|
|
|
a.offsetPreloadTimes[offset] = preloadTimes{
|
2015-08-20 08:18:46 -07:00
|
|
|
instants: map[model.Fingerprint]struct{}{},
|
|
|
|
ranges: map[model.Fingerprint]time.Duration{},
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return a.offsetPreloadTimes[offset]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Retrieve fingerprints and metrics for the required time range for
|
|
|
|
// each metric or matrix selector node.
|
|
|
|
Inspect(a.Expr, func(node Node) bool {
|
|
|
|
switch n := node.(type) {
|
|
|
|
case *VectorSelector:
|
2015-06-15 09:25:31 -07:00
|
|
|
n.metrics = a.Storage.MetricsForLabelMatchers(n.LabelMatchers...)
|
2015-08-20 08:18:46 -07:00
|
|
|
n.iterators = make(map[model.Fingerprint]local.SeriesIterator, len(n.metrics))
|
2015-06-15 09:25:31 -07:00
|
|
|
|
2015-03-30 10:13:36 -07:00
|
|
|
pt := getPreloadTimes(n.Offset)
|
2015-06-15 09:25:31 -07:00
|
|
|
for fp := range n.metrics {
|
2015-03-30 10:13:36 -07:00
|
|
|
// Only add the fingerprint to the instants if not yet present in the
|
|
|
|
// ranges. Ranges always contain more points and span more time than
|
|
|
|
// instants for the same offset.
|
|
|
|
if _, alreadyInRanges := pt.ranges[fp]; !alreadyInRanges {
|
|
|
|
pt.instants[fp] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case *MatrixSelector:
|
2015-06-15 09:25:31 -07:00
|
|
|
n.metrics = a.Storage.MetricsForLabelMatchers(n.LabelMatchers...)
|
2015-08-20 08:18:46 -07:00
|
|
|
n.iterators = make(map[model.Fingerprint]local.SeriesIterator, len(n.metrics))
|
2015-06-15 09:25:31 -07:00
|
|
|
|
2015-03-30 10:13:36 -07:00
|
|
|
pt := getPreloadTimes(n.Offset)
|
2015-06-15 09:25:31 -07:00
|
|
|
for fp := range n.metrics {
|
2015-03-30 10:13:36 -07:00
|
|
|
if pt.ranges[fp] < n.Range {
|
|
|
|
pt.ranges[fp] = n.Range
|
|
|
|
// Delete the fingerprint from the instants. Ranges always contain more
|
|
|
|
// points and span more time than instants, so we don't need to track
|
|
|
|
// an instant for the same fingerprint, should we have one.
|
|
|
|
delete(pt.instants, fp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
|
|
|
|
// Currently we do not return an error but we might place a context check in here
|
|
|
|
// or extend the stage in some other way.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prepare the expression evaluation by preloading all required chunks from the storage
|
|
|
|
// and setting the respective storage iterators in the AST nodes.
|
|
|
|
func (a *Analyzer) Prepare(ctx context.Context) (local.Preloader, error) {
|
|
|
|
const env = "query preparation"
|
|
|
|
|
|
|
|
if a.offsetPreloadTimes == nil {
|
|
|
|
return nil, errors.New("analysis must be performed before preparing query")
|
|
|
|
}
|
|
|
|
var err error
|
2016-02-09 18:47:00 -08:00
|
|
|
// The preloader must not be closed unless an error occured as closing
|
2015-03-30 10:13:36 -07:00
|
|
|
// unpins the preloaded chunks.
|
|
|
|
p := a.Storage.NewPreloader()
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
p.Close()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Preload all analyzed ranges.
|
2016-03-02 04:45:17 -08:00
|
|
|
iters := map[time.Duration]map[model.Fingerprint]local.SeriesIterator{}
|
2015-03-30 10:13:36 -07:00
|
|
|
for offset, pt := range a.offsetPreloadTimes {
|
2016-03-02 04:45:17 -08:00
|
|
|
itersForDuration := map[model.Fingerprint]local.SeriesIterator{}
|
|
|
|
iters[offset] = itersForDuration
|
2015-03-30 10:13:36 -07:00
|
|
|
start := a.Start.Add(-offset)
|
|
|
|
end := a.End.Add(-offset)
|
|
|
|
for fp, rangeDuration := range pt.ranges {
|
2015-05-12 03:03:30 -07:00
|
|
|
if err = contextDone(ctx, env); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
startOfRange := start.Add(-rangeDuration)
|
|
|
|
if StalenessDelta > rangeDuration {
|
|
|
|
// Cover a weird corner case: The expression
|
|
|
|
// mixes up instants and ranges for the same
|
|
|
|
// series. We'll handle that over-all as
|
|
|
|
// range. But if the rangeDuration is smaller
|
|
|
|
// than the StalenessDelta, the range wouldn't
|
|
|
|
// cover everything potentially needed for the
|
|
|
|
// instant, so we have to extend startOfRange.
|
|
|
|
startOfRange = start.Add(-StalenessDelta)
|
|
|
|
}
|
|
|
|
iter, err := p.PreloadRange(fp, startOfRange, end)
|
2015-03-30 10:13:36 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-03-02 04:45:17 -08:00
|
|
|
itersForDuration[fp] = iter
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
for fp := range pt.instants {
|
2015-05-12 03:03:30 -07:00
|
|
|
if err = contextDone(ctx, env); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
// Need to look backwards by StalenessDelta but not
|
|
|
|
// forward because we always return the closest sample
|
|
|
|
// _before_ the reference time.
|
|
|
|
iter, err := p.PreloadRange(fp, start.Add(-StalenessDelta), end)
|
2015-03-30 10:13:36 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-03-02 04:45:17 -08:00
|
|
|
itersForDuration[fp] = iter
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attach storage iterators to AST nodes.
|
|
|
|
Inspect(a.Expr, func(node Node) bool {
|
|
|
|
switch n := node.(type) {
|
|
|
|
case *VectorSelector:
|
2015-06-15 09:25:31 -07:00
|
|
|
for fp := range n.metrics {
|
2016-03-02 04:45:17 -08:00
|
|
|
n.iterators[fp] = iters[n.Offset][fp]
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
case *MatrixSelector:
|
2015-06-15 09:25:31 -07:00
|
|
|
for fp := range n.metrics {
|
2016-03-02 04:45:17 -08:00
|
|
|
n.iterators[fp] = iters[n.Offset][fp]
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
|
|
|
|
return p, nil
|
|
|
|
}
|