prometheus/promql/engine.go

1536 lines
40 KiB
Go
Raw Normal View History

// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promql
import (
2016-07-04 05:10:42 -07:00
"container/heap"
"context"
"fmt"
"math"
"runtime"
"sort"
2016-12-28 00:16:48 -08:00
"strconv"
"sync"
"time"
2017-08-11 11:45:52 -07:00
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
opentracing "github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/pkg/labels"
2016-12-28 00:16:48 -08:00
"github.com/prometheus/prometheus/pkg/timestamp"
"github.com/prometheus/prometheus/pkg/value"
"github.com/prometheus/prometheus/storage"
2015-05-29 04:30:30 -07:00
"github.com/prometheus/prometheus/util/stats"
)
const (
namespace = "prometheus"
subsystem = "engine"
queryTag = "query"
// The largest SampleValue that can be converted to an int64 without overflow.
maxInt64 = 9223372036854774784
// The smallest SampleValue that can be converted to an int64 without underflow.
minInt64 = -9223372036854775808
)
2017-11-23 04:04:37 -08:00
type engineMetrics struct {
currentQueries prometheus.Gauge
maxConcurrentQueries prometheus.Gauge
queryQueueTime prometheus.Summary
2017-11-23 04:04:37 -08:00
queryPrepareTime prometheus.Summary
queryInnerEval prometheus.Summary
queryResultAppend prometheus.Summary
queryResultSort prometheus.Summary
}
// convertibleToInt64 returns true if v does not over-/underflow an int64.
func convertibleToInt64(v float64) bool {
return v <= maxInt64 && v >= minInt64
}
type (
// ErrQueryTimeout is returned if a query timed out during processing.
ErrQueryTimeout string
// ErrQueryCanceled is returned if a query was canceled during processing.
ErrQueryCanceled string
// ErrStorage is returned if an error was encountered in the storage layer
// during query handling.
ErrStorage error
)
2015-05-01 08:58:58 -07:00
func (e ErrQueryTimeout) Error() string { return fmt.Sprintf("query timed out in %s", string(e)) }
func (e ErrQueryCanceled) Error() string { return fmt.Sprintf("query was canceled in %s", string(e)) }
// A Query is derived from an a raw query string and can be run against an engine
// it is associated with.
type Query interface {
// Exec processes the query and
Exec(ctx context.Context) *Result
// Statement returns the parsed statement of the query.
Statement() Statement
// Stats returns statistics about the lifetime of the query.
Stats() *stats.TimerGroup
// Cancel signals that a running query execution should be aborted.
Cancel()
}
// query implements the Query interface.
type query struct {
// Underlying data provider.
queryable storage.Queryable
// The original query string.
q string
// Statement of the parsed query.
stmt Statement
// Timer stats for the query execution.
stats *stats.TimerGroup
2017-07-18 05:58:00 -07:00
// Cancellation function for the query.
cancel func()
// The engine against which the query is executed.
ng *Engine
}
// Statement implements the Query interface.
func (q *query) Statement() Statement {
return q.stmt
}
// Stats implements the Query interface.
func (q *query) Stats() *stats.TimerGroup {
return q.stats
}
// Cancel implements the Query interface.
func (q *query) Cancel() {
if q.cancel != nil {
q.cancel()
}
}
// Exec implements the Query interface.
func (q *query) Exec(ctx context.Context) *Result {
if span := opentracing.SpanFromContext(ctx); span != nil {
span.SetTag(queryTag, q.stmt.String())
}
res, err := q.ng.exec(ctx, q)
return &Result{Err: err, Value: res}
}
// contextDone returns an error if the context was canceled or timed out.
func contextDone(ctx context.Context, env string) error {
select {
case <-ctx.Done():
err := ctx.Err()
switch err {
case context.Canceled:
return ErrQueryCanceled(env)
case context.DeadlineExceeded:
return ErrQueryTimeout(env)
default:
return err
}
default:
return nil
}
}
// Engine handles the lifetime of queries from beginning to end.
// It is connected to a querier.
type Engine struct {
logger log.Logger
metrics *engineMetrics
timeout time.Duration
gate *queryGate
}
// NewEngine returns a new engine.
func NewEngine(logger log.Logger, reg prometheus.Registerer, maxConcurrent int, timeout time.Duration) *Engine {
if logger == nil {
logger = log.NewNopLogger()
2015-06-15 03:49:11 -07:00
}
2017-11-23 04:04:37 -08:00
metrics := &engineMetrics{
currentQueries: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "queries",
Help: "The current number of queries being executed or waiting.",
}),
maxConcurrentQueries: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "queries_concurrent_max",
Help: "The max number of concurrent queries.",
}),
queryQueueTime: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "query_duration_seconds",
Help: "Query timings",
ConstLabels: prometheus.Labels{"slice": "queue_time"},
}),
2017-11-23 04:04:37 -08:00
queryPrepareTime: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "query_duration_seconds",
Help: "Query timings",
ConstLabels: prometheus.Labels{"slice": "prepare_time"},
}),
queryInnerEval: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "query_duration_seconds",
Help: "Query timings",
ConstLabels: prometheus.Labels{"slice": "inner_eval"},
}),
queryResultAppend: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "query_duration_seconds",
Help: "Query timings",
ConstLabels: prometheus.Labels{"slice": "result_append"},
}),
queryResultSort: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "query_duration_seconds",
Help: "Query timings",
ConstLabels: prometheus.Labels{"slice": "result_sort"},
}),
}
metrics.maxConcurrentQueries.Set(float64(maxConcurrent))
2017-11-23 04:04:37 -08:00
if reg != nil {
reg.MustRegister(
2017-11-23 04:04:37 -08:00
metrics.currentQueries,
metrics.maxConcurrentQueries,
metrics.queryInnerEval,
metrics.queryPrepareTime,
metrics.queryResultAppend,
metrics.queryResultSort,
)
}
return &Engine{
gate: newQueryGate(maxConcurrent),
timeout: timeout,
logger: logger,
metrics: metrics,
}
}
// NewInstantQuery returns an evaluation query for the given expression at the given time.
func (ng *Engine) NewInstantQuery(q storage.Queryable, qs string, ts time.Time) (Query, error) {
expr, err := ParseExpr(qs)
if err != nil {
return nil, err
}
qry := ng.newQuery(q, expr, ts, ts, 0)
qry.q = qs
return qry, nil
}
// NewRangeQuery returns an evaluation query for the given time range and with
// the resolution set by the interval.
func (ng *Engine) NewRangeQuery(q storage.Queryable, qs string, start, end time.Time, interval time.Duration) (Query, error) {
expr, err := ParseExpr(qs)
if err != nil {
return nil, err
}
if expr.Type() != ValueTypeVector && expr.Type() != ValueTypeScalar {
2016-12-24 01:44:04 -08:00
return nil, fmt.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", documentedType(expr.Type()))
}
qry := ng.newQuery(q, expr, start, end, interval)
qry.q = qs
return qry, nil
}
func (ng *Engine) newQuery(q storage.Queryable, expr Expr, start, end time.Time, interval time.Duration) *query {
es := &EvalStmt{
Expr: expr,
Start: start,
End: end,
Interval: interval,
}
qry := &query{
stmt: es,
ng: ng,
stats: stats.NewTimerGroup(),
queryable: q,
}
return qry
}
// testStmt is an internal helper statement that allows execution
// of an arbitrary function during handling. It is used to test the Engine.
type testStmt func(context.Context) error
func (testStmt) String() string { return "test statement" }
func (testStmt) stmt() {}
func (ng *Engine) newTestQuery(f func(context.Context) error) Query {
qry := &query{
q: "test statement",
stmt: testStmt(f),
ng: ng,
stats: stats.NewTimerGroup(),
}
return qry
}
// exec executes the query.
//
// At this point per query only one EvalStmt is evaluated. Alert and record
// statements are not handled by the Engine.
func (ng *Engine) exec(ctx context.Context, q *query) (Value, error) {
2017-11-23 04:04:37 -08:00
ng.metrics.currentQueries.Inc()
defer ng.metrics.currentQueries.Dec()
ctx, cancel := context.WithTimeout(ctx, ng.timeout)
q.cancel = cancel
execTimer := q.stats.GetTimer(stats.ExecTotalTime).Start()
defer execTimer.Stop()
queueTimer := q.stats.GetTimer(stats.ExecQueueTime).Start()
if err := ng.gate.Start(ctx); err != nil {
return nil, err
}
defer ng.gate.Done()
queueTimer.Stop()
ng.metrics.queryQueueTime.Observe(queueTimer.ElapsedTime().Seconds())
// Cancel when execution is done or an error was raised.
defer q.cancel()
const env = "query execution"
evalTimer := q.stats.GetTimer(stats.EvalTotalTime).Start()
defer evalTimer.Stop()
// The base context might already be canceled on the first iteration (e.g. during shutdown).
if err := contextDone(ctx, env); err != nil {
return nil, err
}
switch s := q.Statement().(type) {
case *EvalStmt:
return ng.execEvalStmt(ctx, q, s)
case testStmt:
return nil, s(ctx)
}
panic(fmt.Errorf("promql.Engine.exec: unhandled statement of type %T", q.Statement()))
}
func timeMilliseconds(t time.Time) int64 {
return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
}
func durationMilliseconds(d time.Duration) int64 {
return int64(d / (time.Millisecond / time.Nanosecond))
}
// execEvalStmt evaluates the expression of an evaluation statement for the given time range.
func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *EvalStmt) (Value, error) {
prepareTimer := query.stats.GetTimer(stats.QueryPreparationTime).Start()
querier, err := ng.populateIterators(ctx, query.queryable, s)
prepareTimer.Stop()
2017-11-23 04:04:37 -08:00
ng.metrics.queryPrepareTime.Observe(prepareTimer.ElapsedTime().Seconds())
// XXX(fabxc): the querier returned by populateIterators might be instantiated
// we must not return without closing irrespective of the error.
// TODO: make this semantically saner.
if querier != nil {
defer querier.Close()
}
if err != nil {
return nil, err
}
evalTimer := query.stats.GetTimer(stats.InnerEvalTime).Start()
// Instant evaluation.
if s.Start == s.End && s.Interval == 0 {
start := timeMilliseconds(s.Start)
evaluator := &evaluator{
Timestamp: start,
ctx: ctx,
logger: ng.logger,
}
val, err := evaluator.Eval(s.Expr)
if err != nil {
return nil, err
}
evalTimer.Stop()
2017-11-23 04:04:37 -08:00
ng.metrics.queryInnerEval.Observe(evalTimer.ElapsedTime().Seconds())
// Point might have a different timestamp, force it to the evaluation
// timestamp as that is when we ran the evaluation.
switch v := val.(type) {
case Scalar:
v.T = start
case Vector:
for i := range v {
v[i].Point.T = start
}
}
return val, nil
}
numSteps := int(s.End.Sub(s.Start) / s.Interval)
// Range evaluation.
2016-12-24 02:32:42 -08:00
Seriess := map[uint64]Series{}
for ts := s.Start; !ts.After(s.End); ts = ts.Add(s.Interval) {
if err := contextDone(ctx, "range evaluation"); err != nil {
return nil, err
}
t := timeMilliseconds(ts)
evaluator := &evaluator{
Timestamp: t,
ctx: ctx,
logger: ng.logger,
}
val, err := evaluator.Eval(s.Expr)
if err != nil {
return nil, err
}
switch v := val.(type) {
2016-12-24 01:44:04 -08:00
case Scalar:
// As the expression type does not change we can safely default to 0
2016-12-24 01:44:04 -08:00
// as the fingerprint for Scalar expressions.
2016-12-24 02:32:42 -08:00
ss, ok := Seriess[0]
if !ok {
2016-12-24 02:32:42 -08:00
ss = Series{Points: make([]Point, 0, numSteps)}
Seriess[0] = ss
}
ss.Points = append(ss.Points, Point{V: v.V, T: t})
Seriess[0] = ss
2016-12-24 01:40:09 -08:00
case Vector:
for _, sample := range v {
h := sample.Metric.Hash()
2016-12-24 02:32:42 -08:00
ss, ok := Seriess[h]
if !ok {
2016-12-24 02:32:42 -08:00
ss = Series{
Metric: sample.Metric,
2016-12-24 02:29:39 -08:00
Points: make([]Point, 0, numSteps),
}
2016-12-24 02:32:42 -08:00
Seriess[h] = ss
}
sample.Point.T = t
2016-12-24 02:29:39 -08:00
ss.Points = append(ss.Points, sample.Point)
Seriess[h] = ss
}
default:
panic(fmt.Errorf("promql.Engine.exec: invalid expression type %q", val.Type()))
}
}
evalTimer.Stop()
2017-11-23 04:04:37 -08:00
ng.metrics.queryInnerEval.Observe(evalTimer.ElapsedTime().Seconds())
if err := contextDone(ctx, "expression evaluation"); err != nil {
return nil, err
}
appendTimer := query.stats.GetTimer(stats.ResultAppendTime).Start()
2016-12-24 01:42:54 -08:00
mat := Matrix{}
2016-12-24 02:32:42 -08:00
for _, ss := range Seriess {
2015-08-24 09:04:41 -07:00
mat = append(mat, ss)
}
appendTimer.Stop()
2017-11-23 04:04:37 -08:00
ng.metrics.queryResultAppend.Observe(appendTimer.ElapsedTime().Seconds())
if err := contextDone(ctx, "expression evaluation"); err != nil {
return nil, err
}
// TODO(fabxc): order ensured by storage?
// TODO(fabxc): where to ensure metric labels are a copy from the storage internals.
sortTimer := query.stats.GetTimer(stats.ResultSortTime).Start()
sort.Sort(mat)
sortTimer.Stop()
2017-11-23 04:04:37 -08:00
ng.metrics.queryResultSort.Observe(sortTimer.ElapsedTime().Seconds())
return mat, nil
}
func (ng *Engine) populateIterators(ctx context.Context, q storage.Queryable, s *EvalStmt) (storage.Querier, error) {
var maxOffset time.Duration
Inspect(s.Expr, func(node Node, _ []Node) bool {
switch n := node.(type) {
case *VectorSelector:
if maxOffset < LookbackDelta {
maxOffset = LookbackDelta
2016-12-28 00:16:48 -08:00
}
if n.Offset+LookbackDelta > maxOffset {
maxOffset = n.Offset + LookbackDelta
}
case *MatrixSelector:
2016-12-28 00:16:48 -08:00
if maxOffset < n.Range {
maxOffset = n.Range
}
if n.Offset+n.Range > maxOffset {
maxOffset = n.Offset + n.Range
}
}
return true
})
mint := s.Start.Add(-maxOffset)
querier, err := q.Querier(ctx, timestamp.FromTime(mint), timestamp.FromTime(s.End))
if err != nil {
return nil, err
}
Inspect(s.Expr, func(node Node, path []Node) bool {
2018-02-15 04:08:00 -08:00
var set storage.SeriesSet
params := &storage.SelectParams{
Step: int64(s.Interval / time.Millisecond),
}
switch n := node.(type) {
case *VectorSelector:
params.Func = extractFuncFromPath(path)
2018-02-15 04:08:00 -08:00
set, err = querier.Select(params, n.LabelMatchers...)
2017-11-23 04:50:06 -08:00
if err != nil {
level.Error(ng.logger).Log("msg", "error selecting series set", "err", err)
return false
}
n.series, err = expandSeriesSet(set)
if err != nil {
2016-12-28 00:16:48 -08:00
// TODO(fabxc): use multi-error.
2017-08-11 11:45:52 -07:00
level.Error(ng.logger).Log("msg", "error expanding series set", "err", err)
return false
}
for _, s := range n.series {
it := storage.NewBuffer(s.Iterator(), durationMilliseconds(LookbackDelta))
n.iterators = append(n.iterators, it)
}
case *MatrixSelector:
params.Func = extractFuncFromPath(path)
2018-02-15 04:08:00 -08:00
set, err = querier.Select(params, n.LabelMatchers...)
2017-11-23 04:50:06 -08:00
if err != nil {
level.Error(ng.logger).Log("msg", "error selecting series set", "err", err)
return false
}
n.series, err = expandSeriesSet(set)
if err != nil {
2017-08-11 11:45:52 -07:00
level.Error(ng.logger).Log("msg", "error expanding series set", "err", err)
return false
}
for _, s := range n.series {
it := storage.NewBuffer(s.Iterator(), durationMilliseconds(n.Range))
n.iterators = append(n.iterators, it)
}
}
return true
})
return querier, err
}
// extractFuncFromPath walks up the path and searches for the first instance of
// a function or aggregation.
func extractFuncFromPath(p []Node) string {
if len(p) == 0 {
return ""
}
switch n := p[len(p)-1].(type) {
case *AggregateExpr:
return n.Op.String()
case *Call:
return n.Func.Name
case *BinaryExpr:
// If we hit a binary expression we terminate since we only care about functions
// or aggregations over a single metric.
return ""
}
return extractFuncFromPath(p[:len(p)-1])
}
func expandSeriesSet(it storage.SeriesSet) (res []storage.Series, err error) {
for it.Next() {
res = append(res, it.At())
}
return res, it.Err()
}
// An evaluator evaluates given expressions at a fixed timestamp. It is attached to an
// engine through which it connects to a querier and reports errors. On timeout or
// cancellation of its context it terminates.
type evaluator struct {
ctx context.Context
Timestamp int64 // time in milliseconds
finalizers []func()
logger log.Logger
}
func (ev *evaluator) close() {
for _, f := range ev.finalizers {
f()
}
}
// fatalf causes a panic with the input formatted into an error.
func (ev *evaluator) errorf(format string, args ...interface{}) {
ev.error(fmt.Errorf(format, args...))
}
// fatal causes a panic with the given error.
func (ev *evaluator) error(err error) {
panic(err)
}
// recover is the handler that turns panics into returns from the top level of evaluation.
func (ev *evaluator) recover(errp *error) {
e := recover()
2017-08-11 11:45:52 -07:00
if e == nil {
return
}
if _, ok := e.(runtime.Error); ok {
// Print the stack trace but do not inhibit the running application.
buf := make([]byte, 64<<10)
buf = buf[:runtime.Stack(buf, false)]
level.Error(ev.logger).Log("msg", "runtime panic in parser", "err", e, "stacktrace", string(buf))
*errp = fmt.Errorf("unexpected error")
} else {
*errp = e.(error)
}
}
2016-12-24 01:44:04 -08:00
// evalScalar attempts to evaluate e to a Scalar value and errors otherwise.
func (ev *evaluator) evalScalar(e Expr) Scalar {
val := ev.eval(e)
2016-12-24 01:44:04 -08:00
sv, ok := val.(Scalar)
if !ok {
2016-12-24 01:44:04 -08:00
ev.errorf("expected Scalar but got %s", documentedType(val.Type()))
}
return sv
}
2016-12-24 01:40:09 -08:00
// evalVector attempts to evaluate e to a Vector value and errors otherwise.
func (ev *evaluator) evalVector(e Expr) Vector {
val := ev.eval(e)
2016-12-24 01:40:09 -08:00
vec, ok := val.(Vector)
if !ok {
2016-12-24 01:40:09 -08:00
ev.errorf("expected instant Vector but got %s", documentedType(val.Type()))
}
return vec
}
// evalInt attempts to evaluate e into an integer and errors otherwise.
func (ev *evaluator) evalInt(e Expr) int64 {
sc := ev.evalScalar(e)
2016-12-24 02:23:06 -08:00
if !convertibleToInt64(sc.V) {
ev.errorf("Scalar value %v overflows int64", sc.V)
}
2016-12-24 02:23:06 -08:00
return int64(sc.V)
}
// evalFloat attempts to evaluate e into a float and errors otherwise.
func (ev *evaluator) evalFloat(e Expr) float64 {
sc := ev.evalScalar(e)
2016-12-24 02:23:06 -08:00
return float64(sc.V)
}
2016-12-24 01:42:54 -08:00
// evalMatrix attempts to evaluate e into a Matrix and errors otherwise.
2016-12-24 01:40:09 -08:00
// The error message uses the term "range Vector" to match the user facing
// documentation.
2016-12-24 01:42:54 -08:00
func (ev *evaluator) evalMatrix(e Expr) Matrix {
val := ev.eval(e)
2016-12-24 01:42:54 -08:00
mat, ok := val.(Matrix)
if !ok {
2016-12-24 01:40:09 -08:00
ev.errorf("expected range Vector but got %s", documentedType(val.Type()))
}
return mat
}
// evalString attempts to evaluate e to a string value and errors otherwise.
2016-12-24 02:25:26 -08:00
func (ev *evaluator) evalString(e Expr) String {
val := ev.eval(e)
2016-12-24 02:25:26 -08:00
sv, ok := val.(String)
if !ok {
ev.errorf("expected string but got %s", documentedType(val.Type()))
}
return sv
}
// evalOneOf evaluates e and errors unless the result is of one of the given types.
func (ev *evaluator) evalOneOf(e Expr, t1, t2 ValueType) Value {
val := ev.eval(e)
if val.Type() != t1 && val.Type() != t2 {
ev.errorf("expected %s or %s but got %s", documentedType(t1), documentedType(t2), documentedType(val.Type()))
}
return val
}
func (ev *evaluator) Eval(expr Expr) (v Value, err error) {
defer ev.recover(&err)
defer ev.close()
return ev.eval(expr), nil
}
// eval evaluates the given expression as the given AST expression node requires.
func (ev *evaluator) eval(expr Expr) Value {
// This is the top-level evaluation method.
2017-07-18 05:58:00 -07:00
// Thus, we check for timeout/cancellation here.
if err := contextDone(ev.ctx, "expression evaluation"); err != nil {
ev.error(err)
}
switch e := expr.(type) {
case *AggregateExpr:
2016-12-24 01:40:09 -08:00
Vector := ev.evalVector(e.Expr)
2017-10-05 05:19:52 -07:00
return ev.aggregation(e.Op, e.Grouping, e.Without, e.Param, Vector)
case *BinaryExpr:
lhs := ev.evalOneOf(e.LHS, ValueTypeScalar, ValueTypeVector)
rhs := ev.evalOneOf(e.RHS, ValueTypeScalar, ValueTypeVector)
switch lt, rt := lhs.Type(), rhs.Type(); {
case lt == ValueTypeScalar && rt == ValueTypeScalar:
2016-12-24 01:44:04 -08:00
return Scalar{
2016-12-24 02:37:16 -08:00
V: scalarBinop(e.Op, lhs.(Scalar).V, rhs.(Scalar).V),
2016-12-24 02:23:06 -08:00
T: ev.Timestamp,
}
case lt == ValueTypeVector && rt == ValueTypeVector:
switch e.Op {
case itemLAND:
2016-12-24 01:40:09 -08:00
return ev.VectorAnd(lhs.(Vector), rhs.(Vector), e.VectorMatching)
case itemLOR:
2016-12-24 01:40:09 -08:00
return ev.VectorOr(lhs.(Vector), rhs.(Vector), e.VectorMatching)
case itemLUnless:
2016-12-24 01:40:09 -08:00
return ev.VectorUnless(lhs.(Vector), rhs.(Vector), e.VectorMatching)
default:
2016-12-24 01:40:09 -08:00
return ev.VectorBinop(e.Op, lhs.(Vector), rhs.(Vector), e.VectorMatching, e.ReturnBool)
}
case lt == ValueTypeVector && rt == ValueTypeScalar:
2016-12-24 02:37:16 -08:00
return ev.VectorscalarBinop(e.Op, lhs.(Vector), rhs.(Scalar), false, e.ReturnBool)
case lt == ValueTypeScalar && rt == ValueTypeVector:
2016-12-24 02:37:16 -08:00
return ev.VectorscalarBinop(e.Op, rhs.(Vector), lhs.(Scalar), true, e.ReturnBool)
}
case *Call:
return e.Func.Call(ev, e.Args)
case *MatrixSelector:
2016-12-28 00:16:48 -08:00
return ev.matrixSelector(e)
case *NumberLiteral:
2016-12-24 02:23:06 -08:00
return Scalar{V: e.Val, T: ev.Timestamp}
case *ParenExpr:
return ev.eval(e.Expr)
case *StringLiteral:
2016-12-24 02:25:26 -08:00
return String{V: e.Val, T: ev.Timestamp}
case *UnaryExpr:
se := ev.evalOneOf(e.Expr, ValueTypeScalar, ValueTypeVector)
// Only + and - are possible operators.
if e.Op == itemSUB {
switch v := se.(type) {
2016-12-24 01:44:04 -08:00
case Scalar:
2016-12-24 02:23:06 -08:00
v.V = -v.V
2016-12-24 01:40:09 -08:00
case Vector:
for i, sv := range v {
2016-12-24 02:23:06 -08:00
v[i].V = -sv.V
}
}
}
return se
case *VectorSelector:
2016-12-28 00:16:48 -08:00
return ev.vectorSelector(e)
}
panic(fmt.Errorf("unhandled expression of type: %T", expr))
}
2016-12-28 00:16:48 -08:00
// vectorSelector evaluates a *VectorSelector expression.
func (ev *evaluator) vectorSelector(node *VectorSelector) Vector {
var (
2016-12-24 01:40:09 -08:00
vec = make(Vector, 0, len(node.series))
refTime = ev.Timestamp - durationMilliseconds(node.Offset)
)
for i, it := range node.iterators {
var t int64
var v float64
2016-12-28 00:16:48 -08:00
ok := it.Seek(refTime)
if !ok {
if it.Err() != nil {
ev.error(it.Err())
}
}
if ok {
t, v = it.Values()
}
peek := 1
2016-12-28 00:16:48 -08:00
if !ok || t > refTime {
t, v, ok = it.PeekBack(peek)
peek++
if !ok || t < refTime-durationMilliseconds(LookbackDelta) {
continue
}
}
2017-04-19 03:54:38 -07:00
if value.IsStaleNaN(v) {
continue
}
2016-12-24 02:32:10 -08:00
vec = append(vec, Sample{
2016-12-25 02:34:22 -08:00
Metric: node.series[i].Labels(),
Point: Point{V: v, T: t},
Streamline series iterator creation This will fix issue #1035 and will also help to make issue #1264 less bad. The fundamental problem in the current code: In the preload phase, we quite accurately determine which chunks will be used for the query being executed. However, in the subsequent step of creating series iterators, the created iterators are referencing _all_ in-memory chunks in their series, even the un-pinned ones. In iterator creation, we copy a pointer to each in-memory chunk of a series into the iterator. While this creates a certain amount of allocation churn, the worst thing about it is that copying the chunk pointer out of the chunkDesc requires a mutex acquisition. (Remember that the iterator will also reference un-pinned chunks, so we need to acquire the mutex to protect against concurrent eviction.) The worst case happens if a series doesn't even contain any relevant samples for the query time range. We notice that during preloading but then we will still create a series iterator for it. But even for series that do contain relevant samples, the overhead is quite bad for instant queries that retrieve a single sample from each series, but still go through all the effort of series iterator creation. All of that is particularly bad if a series has many in-memory chunks. This commit addresses the problem from two sides: First, it merges preloading and iterator creation into one step, i.e. the preload call returns an iterator for exactly the preloaded chunks. Second, the required mutex acquisition in chunkDesc has been greatly reduced. That was enabled by a side effect of the first step, which is that the iterator is only referencing pinned chunks, so there is no risk of concurrent eviction anymore, and chunks can be accessed without mutex acquisition. To simplify the code changes for the above, the long-planned change of ValueAtTime to ValueAtOrBefore time was performed at the same time. (It should have been done first, but it kind of accidentally happened while I was in the middle of writing the series iterator changes. Sorry for that.) So far, we actively filtered the up to two values that were returned by ValueAtTime, i.e. we invested work to retrieve up to two values, and then we invested more work to throw one of them away. The SeriesIterator.BoundaryValues method can be removed once #1401 is fixed. But I really didn't want to load even more changes into this PR. Benchmarks: The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times faster) and allocate 95% fewer bytes. The reason for that is that the benchmark reads one sample after another from the time series and creates a new series iterator for each sample read. To find out how much these improvements matter in practice, I have mirrored a beefy Prometheus server at SoundCloud that suffers from both issues #1035 and #1264. To reach steady state that would be comparable, the server needs to run for 15d. So far, it has run for 1d. The test server currently has only half as many memory time series and 60% of the memory chunks the main server has. The 90th percentile rule evaluation cycle time is ~11s on the main server and only ~3s on the test server. However, these numbers might get much closer over time. In addition to performance improvements, this commit removes about 150 LOC.
2016-02-16 09:47:50 -08:00
})
}
return vec
}
var pointPool = sync.Pool{}
func getPointSlice(sz int) []Point {
p := pointPool.Get()
if p != nil {
return p.([]Point)
}
return make([]Point, 0, sz)
}
func putPointSlice(p []Point) {
pointPool.Put(p[:0])
}
var matrixPool = sync.Pool{}
func getMatrix(sz int) Matrix {
m := matrixPool.Get()
if m != nil {
return m.(Matrix)
}
return make(Matrix, 0, sz)
}
func putMatrix(m Matrix) {
matrixPool.Put(m[:0])
}
2016-12-28 00:16:48 -08:00
// matrixSelector evaluates a *MatrixSelector expression.
func (ev *evaluator) matrixSelector(node *MatrixSelector) Matrix {
var (
offset = durationMilliseconds(node.Offset)
maxt = ev.Timestamp - offset
mint = maxt - durationMilliseconds(node.Range)
matrix = getMatrix(len(node.series))
// Write all points into a single slice to avoid lots of tiny allocations.
allPoints = getPointSlice(5 * len(matrix))
)
ev.finalizers = append(ev.finalizers,
func() { putPointSlice(allPoints) },
func() { putMatrix(matrix) },
)
for i, it := range node.iterators {
start := len(allPoints)
2016-12-24 02:32:42 -08:00
ss := Series{
2016-12-25 02:34:22 -08:00
Metric: node.series[i].Labels(),
}
2016-12-28 00:16:48 -08:00
ok := it.Seek(maxt)
if !ok {
if it.Err() != nil {
ev.error(it.Err())
}
}
buf := it.Buffer()
for buf.Next() {
t, v := buf.At()
2017-04-19 03:54:38 -07:00
if value.IsStaleNaN(v) {
continue
}
// Values in the buffer are guaranteed to be smaller than maxt.
if t >= mint {
allPoints = append(allPoints, Point{T: t, V: v})
}
}
// The seeked sample might also be in the range.
if ok {
t, v := it.Values()
if t == maxt && !value.IsStaleNaN(v) {
allPoints = append(allPoints, Point{T: t, V: v})
}
2016-12-28 00:16:48 -08:00
}
ss.Points = allPoints[start:]
2016-12-28 00:16:48 -08:00
if len(ss.Points) > 0 {
matrix = append(matrix, ss)
}
}
return matrix
}
2016-12-24 01:40:09 -08:00
func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *VectorMatching) Vector {
if matching.Card != CardManyToMany {
panic("set operations must only use many-to-many matching")
}
sigf := signatureFunc(matching.On, matching.MatchingLabels...)
2016-12-24 01:40:09 -08:00
var result Vector
// The set of signatures for the right-hand side Vector.
rightSigs := map[uint64]struct{}{}
// Add all rhs samples to a map so we can easily find matches later.
for _, rs := range rhs {
rightSigs[sigf(rs.Metric)] = struct{}{}
}
for _, ls := range lhs {
2016-12-24 01:40:09 -08:00
// If there's a matching entry in the right-hand side Vector, add the sample.
if _, ok := rightSigs[sigf(ls.Metric)]; ok {
result = append(result, ls)
}
}
return result
}
2016-12-24 01:40:09 -08:00
func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *VectorMatching) Vector {
if matching.Card != CardManyToMany {
panic("set operations must only use many-to-many matching")
}
sigf := signatureFunc(matching.On, matching.MatchingLabels...)
2016-12-24 01:40:09 -08:00
var result Vector
leftSigs := map[uint64]struct{}{}
2016-12-24 01:40:09 -08:00
// Add everything from the left-hand-side Vector.
for _, ls := range lhs {
leftSigs[sigf(ls.Metric)] = struct{}{}
result = append(result, ls)
}
// Add all right-hand side elements which have not been added from the left-hand side.
for _, rs := range rhs {
if _, ok := leftSigs[sigf(rs.Metric)]; !ok {
result = append(result, rs)
}
}
return result
}
2016-12-24 01:40:09 -08:00
func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *VectorMatching) Vector {
if matching.Card != CardManyToMany {
panic("set operations must only use many-to-many matching")
}
sigf := signatureFunc(matching.On, matching.MatchingLabels...)
rightSigs := map[uint64]struct{}{}
for _, rs := range rhs {
rightSigs[sigf(rs.Metric)] = struct{}{}
}
2016-12-24 01:40:09 -08:00
var result Vector
for _, ls := range lhs {
if _, ok := rightSigs[sigf(ls.Metric)]; !ok {
result = append(result, ls)
}
}
return result
}
2016-12-24 01:40:09 -08:00
// VectorBinop evaluates a binary operation between two Vectors, excluding set operators.
func (ev *evaluator) VectorBinop(op ItemType, lhs, rhs Vector, matching *VectorMatching, returnBool bool) Vector {
if matching.Card == CardManyToMany {
panic("many-to-many only allowed for set operators")
}
var (
2016-12-24 01:40:09 -08:00
result = Vector{}
sigf = signatureFunc(matching.On, matching.MatchingLabels...)
)
// The control flow below handles one-to-one or many-to-one matching.
// For one-to-many, swap sidedness and account for the swap when calculating
// values.
if matching.Card == CardOneToMany {
lhs, rhs = rhs, lhs
}
// All samples from the rhs hashed by the matching label/values.
2016-12-24 02:32:10 -08:00
rightSigs := map[uint64]Sample{}
// Add all rhs samples to a map so we can easily find matches later.
for _, rs := range rhs {
sig := sigf(rs.Metric)
// The rhs is guaranteed to be the 'one' side. Having multiple samples
// with the same signature means that the matching is many-to-many.
if _, found := rightSigs[sig]; found {
// Many-to-many matching not allowed.
ev.errorf("many-to-many matching not allowed: matching labels must be unique on one side")
}
rightSigs[sig] = rs
}
// Tracks the match-signature. For one-to-one operations the value is nil. For many-to-one
// the value is a set of signatures to detect duplicated result elements.
matchedSigs := map[uint64]map[uint64]struct{}{}
// For all lhs samples find a respective rhs sample and perform
// the binary operation.
for _, ls := range lhs {
sig := sigf(ls.Metric)
2016-12-24 01:40:09 -08:00
rs, found := rightSigs[sig] // Look for a match in the rhs Vector.
if !found {
continue
}
// Account for potentially swapped sidedness.
2016-12-24 02:23:06 -08:00
vl, vr := ls.V, rs.V
if matching.Card == CardOneToMany {
vl, vr = vr, vl
}
2016-12-24 02:37:16 -08:00
value, keep := vectorElemBinop(op, vl, vr)
if returnBool {
if keep {
value = 1.0
} else {
value = 0.0
}
} else if !keep {
continue
}
metric := resultMetric(ls.Metric, rs.Metric, op, matching)
insertedSigs, exists := matchedSigs[sig]
if matching.Card == CardOneToOne {
if exists {
ev.errorf("multiple matches for labels: many-to-one matching must be explicit (group_left/group_right)")
}
matchedSigs[sig] = nil // Set existence to true.
} else {
// In many-to-one matching the grouping labels have to ensure a unique metric
2016-12-24 01:40:09 -08:00
// for the result Vector. Check whether those labels have already been added for
// the same matching labels.
insertSig := metric.Hash()
if !exists {
insertedSigs = map[uint64]struct{}{}
matchedSigs[sig] = insertedSigs
} else if _, duplicate := insertedSigs[insertSig]; duplicate {
ev.errorf("multiple matches for labels: grouping labels must ensure unique matches")
}
insertedSigs[insertSig] = struct{}{}
}
2016-12-24 02:32:10 -08:00
result = append(result, Sample{
2016-12-24 02:23:06 -08:00
Metric: metric,
Point: Point{V: value, T: ev.Timestamp},
})
}
return result
}
func hashWithoutLabels(lset labels.Labels, names ...string) uint64 {
2016-12-28 00:16:48 -08:00
cm := make(labels.Labels, 0, len(lset))
Outer:
for _, l := range lset {
for _, n := range names {
if n == l.Name {
continue Outer
}
}
if l.Name == labels.MetricName {
continue
}
cm = append(cm, l)
}
return cm.Hash()
}
func hashForLabels(lset labels.Labels, names ...string) uint64 {
cm := make(labels.Labels, 0, len(names))
for _, l := range lset {
for _, n := range names {
if l.Name == n {
cm = append(cm, l)
break
}
}
}
return cm.Hash()
}
// signatureFunc returns a function that calculates the signature for a metric
// ignoring the provided labels. If on, then the given labels are only used instead.
func signatureFunc(on bool, names ...string) func(labels.Labels) uint64 {
// TODO(fabxc): ensure names are sorted and then use that and sortedness
// of labels by names to speed up the operations below.
// Alternatively, inline the hashing and don't build new label sets.
if on {
return func(lset labels.Labels) uint64 { return hashForLabels(lset, names...) }
}
return func(lset labels.Labels) uint64 { return hashWithoutLabels(lset, names...) }
}
2016-12-24 01:40:09 -08:00
// resultMetric returns the metric for the given sample(s) based on the Vector
// binary operation and the matching options.
func resultMetric(lhs, rhs labels.Labels, op ItemType, matching *VectorMatching) labels.Labels {
lb := labels.NewBuilder(lhs)
if shouldDropMetricName(op) {
lb.Del(labels.MetricName)
}
if matching.Card == CardOneToOne {
if matching.On {
Outer:
for _, l := range lhs {
for _, n := range matching.MatchingLabels {
if l.Name == n {
continue Outer
}
}
lb.Del(l.Name)
}
} else {
lb.Del(matching.MatchingLabels...)
}
}
for _, ln := range matching.Include {
// Included labels from the `group_x` modifier are taken from the "one"-side.
if v := rhs.Get(ln); v != "" {
lb.Set(ln, v)
} else {
lb.Del(ln)
}
}
return lb.Labels()
}
2016-12-24 02:37:16 -08:00
// VectorscalarBinop evaluates a binary operation between a Vector and a Scalar.
func (ev *evaluator) VectorscalarBinop(op ItemType, lhs Vector, rhs Scalar, swap, returnBool bool) Vector {
2016-12-24 01:40:09 -08:00
vec := make(Vector, 0, len(lhs))
for _, lhsSample := range lhs {
2016-12-24 02:23:06 -08:00
lv, rv := lhsSample.V, rhs.V
2016-12-24 01:40:09 -08:00
// lhs always contains the Vector. If the original position was different
// swap for calculating the value.
if swap {
lv, rv = rv, lv
}
2016-12-24 02:37:16 -08:00
value, keep := vectorElemBinop(op, lv, rv)
if returnBool {
if keep {
value = 1.0
} else {
value = 0.0
}
keep = true
}
if keep {
2016-12-24 02:23:06 -08:00
lhsSample.V = value
if shouldDropMetricName(op) || returnBool {
lhsSample.Metric = dropMetricName(lhsSample.Metric)
}
2015-08-24 09:04:41 -07:00
vec = append(vec, lhsSample)
}
}
2015-08-24 09:04:41 -07:00
return vec
}
func dropMetricName(l labels.Labels) labels.Labels {
return labels.NewBuilder(l).Del(labels.MetricName).Labels()
}
2016-12-24 02:37:16 -08:00
// scalarBinop evaluates a binary operation between two Scalars.
func scalarBinop(op ItemType, lhs, rhs float64) float64 {
switch op {
case itemADD:
return lhs + rhs
case itemSUB:
return lhs - rhs
case itemMUL:
return lhs * rhs
case itemDIV:
return lhs / rhs
2016-05-29 02:06:14 -07:00
case itemPOW:
return math.Pow(float64(lhs), float64(rhs))
case itemMOD:
return math.Mod(float64(lhs), float64(rhs))
case itemEQL:
return btos(lhs == rhs)
case itemNEQ:
return btos(lhs != rhs)
case itemGTR:
return btos(lhs > rhs)
case itemLSS:
return btos(lhs < rhs)
case itemGTE:
return btos(lhs >= rhs)
case itemLTE:
return btos(lhs <= rhs)
}
2016-12-24 01:44:04 -08:00
panic(fmt.Errorf("operator %q not allowed for Scalar operations", op))
}
2016-12-24 02:37:16 -08:00
// vectorElemBinop evaluates a binary operation between two Vector elements.
func vectorElemBinop(op ItemType, lhs, rhs float64) (float64, bool) {
switch op {
case itemADD:
return lhs + rhs, true
case itemSUB:
return lhs - rhs, true
case itemMUL:
return lhs * rhs, true
case itemDIV:
return lhs / rhs, true
2016-05-29 02:06:14 -07:00
case itemPOW:
return math.Pow(float64(lhs), float64(rhs)), true
case itemMOD:
return math.Mod(float64(lhs), float64(rhs)), true
case itemEQL:
return lhs, lhs == rhs
case itemNEQ:
return lhs, lhs != rhs
case itemGTR:
return lhs, lhs > rhs
case itemLSS:
return lhs, lhs < rhs
case itemGTE:
return lhs, lhs >= rhs
case itemLTE:
return lhs, lhs <= rhs
}
2016-12-24 01:40:09 -08:00
panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op))
}
// intersection returns the metric of common label/value pairs of two input metrics.
func intersection(ls1, ls2 labels.Labels) labels.Labels {
res := make(labels.Labels, 0, 5)
for _, l1 := range ls1 {
for _, l2 := range ls2 {
if l1.Name == l2.Name && l1.Value == l2.Value {
res = append(res, l1)
continue
}
}
}
return res
}
type groupedAggregation struct {
labels labels.Labels
value float64
valuesSquaredSum float64
groupCount int
2016-12-24 02:37:16 -08:00
heap vectorByValueHeap
reverseHeap vectorByReverseValueHeap
}
2016-12-24 01:40:09 -08:00
// aggregation evaluates an aggregation operation on a Vector.
func (ev *evaluator) aggregation(op ItemType, grouping []string, without bool, param Expr, vec Vector) Vector {
result := map[uint64]*groupedAggregation{}
var k int64
2016-07-04 05:10:42 -07:00
if op == itemTopK || op == itemBottomK {
k = ev.evalInt(param)
if k < 1 {
2016-12-24 01:40:09 -08:00
return Vector{}
2016-07-04 05:10:42 -07:00
}
}
2016-07-08 05:48:48 -07:00
var q float64
if op == itemQuantile {
q = ev.evalFloat(param)
}
var valueLabel string
if op == itemCountValues {
2016-12-24 02:25:26 -08:00
valueLabel = ev.evalString(param).V
if !without {
grouping = append(grouping, valueLabel)
}
}
2016-07-04 05:10:42 -07:00
for _, s := range vec {
lb := labels.NewBuilder(s.Metric)
2016-12-28 00:16:48 -08:00
if without {
lb.Del(grouping...)
lb.Del(labels.MetricName)
}
if op == itemCountValues {
2016-12-28 00:16:48 -08:00
lb.Set(valueLabel, strconv.FormatFloat(float64(s.V), 'f', -1, 64))
}
var (
2016-12-28 00:16:48 -08:00
groupingKey uint64
metric = lb.Labels()
)
2016-12-28 00:16:48 -08:00
if without {
groupingKey = metric.Hash()
} else {
groupingKey = hashForLabels(metric, grouping...)
}
group, ok := result[groupingKey]
// Add a new group if it doesn't exist.
if !ok {
var m labels.Labels
2016-12-28 00:16:48 -08:00
2017-10-05 05:19:52 -07:00
if without {
m = metric
} else {
m = make(labels.Labels, 0, len(grouping))
2016-12-28 00:16:48 -08:00
for _, l := range metric {
for _, n := range grouping {
if l.Name == n {
m = append(m, labels.Label{Name: n, Value: l.Value})
break
}
}
}
2016-12-28 00:16:48 -08:00
sort.Sort(m)
}
result[groupingKey] = &groupedAggregation{
labels: m,
2016-12-24 02:23:06 -08:00
value: s.V,
valuesSquaredSum: s.V * s.V,
groupCount: 1,
}
input_vec_len := int64(len(vec))
result_size := k
if k > input_vec_len {
result_size = input_vec_len
}
2016-07-08 05:48:48 -07:00
if op == itemTopK || op == itemQuantile {
result[groupingKey].heap = make(vectorByValueHeap, 0, result_size)
2016-12-24 02:32:10 -08:00
heap.Push(&result[groupingKey].heap, &Sample{
2016-12-24 02:23:06 -08:00
Point: Point{V: s.V},
Metric: s.Metric,
})
2016-07-04 05:10:42 -07:00
} else if op == itemBottomK {
result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 0, result_size)
2016-12-24 02:32:10 -08:00
heap.Push(&result[groupingKey].reverseHeap, &Sample{
2016-12-24 02:23:06 -08:00
Point: Point{V: s.V},
Metric: s.Metric,
})
2016-07-04 05:10:42 -07:00
}
continue
}
switch op {
case itemSum:
2016-12-24 02:23:06 -08:00
group.value += s.V
case itemAvg:
2016-12-24 02:23:06 -08:00
group.value += s.V
group.groupCount++
case itemMax:
2016-12-24 02:23:06 -08:00
if group.value < s.V || math.IsNaN(float64(group.value)) {
group.value = s.V
}
case itemMin:
2016-12-24 02:23:06 -08:00
if group.value > s.V || math.IsNaN(float64(group.value)) {
group.value = s.V
}
case itemCount, itemCountValues:
group.groupCount++
case itemStdvar, itemStddev:
2016-12-24 02:23:06 -08:00
group.value += s.V
group.valuesSquaredSum += s.V * s.V
group.groupCount++
2016-07-04 05:10:42 -07:00
case itemTopK:
2016-12-24 02:23:06 -08:00
if int64(len(group.heap)) < k || group.heap[0].V < s.V || math.IsNaN(float64(group.heap[0].V)) {
if int64(len(group.heap)) == k {
heap.Pop(&group.heap)
2016-07-04 05:10:42 -07:00
}
2016-12-24 02:32:10 -08:00
heap.Push(&group.heap, &Sample{
2016-12-24 02:23:06 -08:00
Point: Point{V: s.V},
Metric: s.Metric,
})
2016-07-04 05:10:42 -07:00
}
2016-07-04 05:10:42 -07:00
case itemBottomK:
2016-12-24 02:23:06 -08:00
if int64(len(group.reverseHeap)) < k || group.reverseHeap[0].V > s.V || math.IsNaN(float64(group.reverseHeap[0].V)) {
if int64(len(group.reverseHeap)) == k {
heap.Pop(&group.reverseHeap)
2016-07-04 05:10:42 -07:00
}
2016-12-24 02:32:10 -08:00
heap.Push(&group.reverseHeap, &Sample{
2016-12-24 02:23:06 -08:00
Point: Point{V: s.V},
Metric: s.Metric,
})
2016-07-04 05:10:42 -07:00
}
2016-07-08 05:48:48 -07:00
case itemQuantile:
group.heap = append(group.heap, s)
default:
panic(fmt.Errorf("expected aggregation operator but got %q", op))
}
}
2016-12-24 01:40:09 -08:00
// Construct the result Vector from the aggregated groups.
resultVector := make(Vector, 0, len(result))
for _, aggr := range result {
switch op {
case itemAvg:
aggr.value = aggr.value / float64(aggr.groupCount)
case itemCount, itemCountValues:
aggr.value = float64(aggr.groupCount)
case itemStdvar:
avg := float64(aggr.value) / float64(aggr.groupCount)
aggr.value = float64(aggr.valuesSquaredSum)/float64(aggr.groupCount) - avg*avg
case itemStddev:
avg := float64(aggr.value) / float64(aggr.groupCount)
aggr.value = math.Sqrt(float64(aggr.valuesSquaredSum)/float64(aggr.groupCount) - avg*avg)
2016-07-04 05:10:42 -07:00
case itemTopK:
// The heap keeps the lowest value on top, so reverse it.
sort.Sort(sort.Reverse(aggr.heap))
for _, v := range aggr.heap {
2016-12-24 02:32:10 -08:00
resultVector = append(resultVector, Sample{
2016-12-24 02:23:06 -08:00
Metric: v.Metric,
Point: Point{V: v.V, T: ev.Timestamp},
2016-07-04 05:10:42 -07:00
})
}
continue // Bypass default append.
2016-07-04 05:10:42 -07:00
case itemBottomK:
// The heap keeps the lowest value on top, so reverse it.
sort.Sort(sort.Reverse(aggr.reverseHeap))
for _, v := range aggr.reverseHeap {
2016-12-24 02:32:10 -08:00
resultVector = append(resultVector, Sample{
2016-12-24 02:23:06 -08:00
Metric: v.Metric,
Point: Point{V: v.V, T: ev.Timestamp},
2016-07-04 05:10:42 -07:00
})
}
continue // Bypass default append.
2016-07-08 05:48:48 -07:00
case itemQuantile:
aggr.value = quantile(q, aggr.heap)
default:
// For other aggregations, we already have the right value.
}
2016-12-24 02:32:10 -08:00
resultVector = append(resultVector, Sample{
2016-12-24 02:23:06 -08:00
Metric: aggr.labels,
Point: Point{V: aggr.value, T: ev.Timestamp},
})
}
return resultVector
}
// btos returns 1 if b is true, 0 otherwise.
func btos(b bool) float64 {
if b {
return 1
}
return 0
}
// shouldDropMetricName returns whether the metric name should be dropped in the
// result of the op operation.
func shouldDropMetricName(op ItemType) bool {
switch op {
case itemADD, itemSUB, itemDIV, itemMUL, itemMOD:
return true
default:
return false
}
}
// LookbackDelta determines the time since the last sample after which a time
2015-06-15 03:49:11 -07:00
// series is considered stale.
var LookbackDelta = 5 * time.Minute
2015-06-15 03:49:11 -07:00
// A queryGate controls the maximum number of concurrently running and waiting queries.
type queryGate struct {
ch chan struct{}
}
// newQueryGate returns a query gate that limits the number of queries
// being concurrently executed.
func newQueryGate(length int) *queryGate {
return &queryGate{
ch: make(chan struct{}, length),
}
}
// Start blocks until the gate has a free spot or the context is done.
func (g *queryGate) Start(ctx context.Context) error {
select {
case <-ctx.Done():
return contextDone(ctx, "query queue")
case g.ch <- struct{}{}:
return nil
}
}
// Done releases a single spot in the gate.
func (g *queryGate) Done() {
select {
case <-g.ch:
default:
panic("engine.queryGate.Done: more operations done than started")
}
}
// documentedType returns the internal type to the equivalent
// user facing terminology as defined in the documentation.
func documentedType(t ValueType) string {
switch t {
2016-12-28 00:16:48 -08:00
case "vector":
return "instant vector"
case "matrix":
return "range vector"
default:
return string(t)
}
}