2015-03-30 10:13:36 -07:00
|
|
|
// Copyright 2013 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package promql
|
|
|
|
|
|
|
|
import (
|
2016-07-04 05:10:42 -07:00
|
|
|
"container/heap"
|
2015-03-30 10:13:36 -07:00
|
|
|
"fmt"
|
|
|
|
"math"
|
|
|
|
"runtime"
|
|
|
|
"sort"
|
|
|
|
"time"
|
|
|
|
|
2017-05-02 16:49:29 -07:00
|
|
|
opentracing "github.com/opentracing/opentracing-go"
|
2017-01-07 06:41:25 -08:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2015-10-03 01:21:43 -07:00
|
|
|
"github.com/prometheus/common/log"
|
2015-08-22 00:42:45 -07:00
|
|
|
"github.com/prometheus/common/model"
|
2015-03-30 10:13:36 -07:00
|
|
|
"golang.org/x/net/context"
|
|
|
|
|
|
|
|
"github.com/prometheus/prometheus/storage/local"
|
|
|
|
"github.com/prometheus/prometheus/storage/metric"
|
2015-05-29 04:30:30 -07:00
|
|
|
"github.com/prometheus/prometheus/util/stats"
|
2015-03-30 10:13:36 -07:00
|
|
|
)
|
|
|
|
|
2016-11-04 16:48:32 -07:00
|
|
|
const (
|
2017-01-07 06:41:25 -08:00
|
|
|
namespace = "prometheus"
|
|
|
|
subsystem = "engine"
|
2017-05-02 16:49:29 -07:00
|
|
|
queryTag = "query"
|
2017-01-07 06:41:25 -08:00
|
|
|
|
2016-11-04 16:48:32 -07:00
|
|
|
// The largest SampleValue that can be converted to an int64 without overflow.
|
|
|
|
maxInt64 model.SampleValue = 9223372036854774784
|
|
|
|
// The smallest SampleValue that can be converted to an int64 without underflow.
|
|
|
|
minInt64 model.SampleValue = -9223372036854775808
|
|
|
|
)
|
|
|
|
|
2017-01-07 06:41:25 -08:00
|
|
|
var (
|
|
|
|
currentQueries = prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "queries",
|
|
|
|
Help: "The current number of queries being executed or waiting.",
|
|
|
|
})
|
|
|
|
maxConcurrentQueries = prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "queries_concurrent_max",
|
|
|
|
Help: "The max number of concurrent queries.",
|
|
|
|
})
|
2017-02-13 08:45:00 -08:00
|
|
|
queryPrepareTime = prometheus.NewSummary(
|
|
|
|
prometheus.SummaryOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "query_duration_seconds",
|
2017-03-06 03:46:37 -08:00
|
|
|
Help: "Query timings",
|
2017-02-13 08:45:00 -08:00
|
|
|
ConstLabels: prometheus.Labels{"slice": "prepare_time"},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
queryInnerEval = prometheus.NewSummary(
|
|
|
|
prometheus.SummaryOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "query_duration_seconds",
|
2017-03-06 03:46:37 -08:00
|
|
|
Help: "Query timings",
|
2017-02-13 08:45:00 -08:00
|
|
|
ConstLabels: prometheus.Labels{"slice": "inner_eval"},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
queryResultAppend = prometheus.NewSummary(
|
|
|
|
prometheus.SummaryOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "query_duration_seconds",
|
2017-03-06 03:46:37 -08:00
|
|
|
Help: "Query timings",
|
2017-02-13 08:45:00 -08:00
|
|
|
ConstLabels: prometheus.Labels{"slice": "result_append"},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
queryResultSort = prometheus.NewSummary(
|
|
|
|
prometheus.SummaryOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "query_duration_seconds",
|
2017-03-06 03:46:37 -08:00
|
|
|
Help: "Query timings",
|
2017-02-13 08:45:00 -08:00
|
|
|
ConstLabels: prometheus.Labels{"slice": "result_sort"},
|
|
|
|
},
|
|
|
|
)
|
2017-01-07 06:41:25 -08:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
prometheus.MustRegister(currentQueries)
|
|
|
|
prometheus.MustRegister(maxConcurrentQueries)
|
2017-02-13 08:45:00 -08:00
|
|
|
prometheus.MustRegister(queryPrepareTime)
|
|
|
|
prometheus.MustRegister(queryInnerEval)
|
|
|
|
prometheus.MustRegister(queryResultAppend)
|
|
|
|
prometheus.MustRegister(queryResultSort)
|
2017-01-07 06:41:25 -08:00
|
|
|
}
|
|
|
|
|
2016-11-04 16:48:32 -07:00
|
|
|
// convertibleToInt64 returns true if v does not over-/underflow an int64.
|
|
|
|
func convertibleToInt64(v model.SampleValue) bool {
|
|
|
|
return v <= maxInt64 && v >= minInt64
|
|
|
|
}
|
|
|
|
|
2015-08-24 09:04:41 -07:00
|
|
|
// sampleStream is a stream of Values belonging to an attached COWMetric.
|
|
|
|
type sampleStream struct {
|
|
|
|
Metric metric.Metric
|
|
|
|
Values []model.SamplePair
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
2015-08-24 09:04:41 -07:00
|
|
|
// sample is a single sample belonging to a COWMetric.
|
|
|
|
type sample struct {
|
|
|
|
Metric metric.Metric
|
|
|
|
Value model.SampleValue
|
|
|
|
Timestamp model.Time
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
2015-08-24 09:04:41 -07:00
|
|
|
// vector is basically only an alias for model.Samples, but the
|
2015-03-30 10:13:36 -07:00
|
|
|
// contract is that in a Vector, all Samples have the same timestamp.
|
2015-08-24 09:04:41 -07:00
|
|
|
type vector []*sample
|
2015-03-30 10:13:36 -07:00
|
|
|
|
2015-08-24 09:04:41 -07:00
|
|
|
func (vector) Type() model.ValueType { return model.ValVector }
|
|
|
|
func (vec vector) String() string { return vec.value().String() }
|
2015-03-30 10:13:36 -07:00
|
|
|
|
2015-08-24 09:04:41 -07:00
|
|
|
func (vec vector) value() model.Vector {
|
|
|
|
val := make(model.Vector, len(vec))
|
|
|
|
for i, s := range vec {
|
|
|
|
val[i] = &model.Sample{
|
|
|
|
Metric: s.Metric.Copy().Metric,
|
|
|
|
Value: s.Value,
|
|
|
|
Timestamp: s.Timestamp,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return val
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
2015-08-24 09:04:41 -07:00
|
|
|
// matrix is a slice of SampleStreams that implements sort.Interface and
|
|
|
|
// has a String method.
|
|
|
|
type matrix []*sampleStream
|
2015-03-30 10:13:36 -07:00
|
|
|
|
2015-08-24 09:04:41 -07:00
|
|
|
func (matrix) Type() model.ValueType { return model.ValMatrix }
|
|
|
|
func (mat matrix) String() string { return mat.value().String() }
|
2015-03-30 10:13:36 -07:00
|
|
|
|
2015-08-24 09:04:41 -07:00
|
|
|
func (mat matrix) value() model.Matrix {
|
|
|
|
val := make(model.Matrix, len(mat))
|
|
|
|
for i, ss := range mat {
|
|
|
|
val[i] = &model.SampleStream{
|
|
|
|
Metric: ss.Metric.Copy().Metric,
|
|
|
|
Values: ss.Values,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return val
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Result holds the resulting value of an execution or an error
|
|
|
|
// if any occurred.
|
|
|
|
type Result struct {
|
|
|
|
Err error
|
2015-08-24 09:04:41 -07:00
|
|
|
Value model.Value
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Vector returns a vector if the result value is one. An error is returned if
|
|
|
|
// the result was an error or the result value is not a vector.
|
2015-08-24 09:04:41 -07:00
|
|
|
func (r *Result) Vector() (model.Vector, error) {
|
2015-03-30 10:13:36 -07:00
|
|
|
if r.Err != nil {
|
|
|
|
return nil, r.Err
|
|
|
|
}
|
2015-08-24 09:04:41 -07:00
|
|
|
v, ok := r.Value.(model.Vector)
|
2015-03-30 10:13:36 -07:00
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("query result is not a vector")
|
|
|
|
}
|
|
|
|
return v, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Matrix returns a matrix. An error is returned if
|
|
|
|
// the result was an error or the result value is not a matrix.
|
2015-08-24 09:04:41 -07:00
|
|
|
func (r *Result) Matrix() (model.Matrix, error) {
|
2015-03-30 10:13:36 -07:00
|
|
|
if r.Err != nil {
|
|
|
|
return nil, r.Err
|
|
|
|
}
|
2015-08-24 09:04:41 -07:00
|
|
|
v, ok := r.Value.(model.Matrix)
|
2015-03-30 10:13:36 -07:00
|
|
|
if !ok {
|
2016-11-17 13:02:28 -08:00
|
|
|
return nil, fmt.Errorf("query result is not a range vector")
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
return v, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Scalar returns a scalar value. An error is returned if
|
|
|
|
// the result was an error or the result value is not a scalar.
|
2015-08-24 09:04:41 -07:00
|
|
|
func (r *Result) Scalar() (*model.Scalar, error) {
|
2015-03-30 10:13:36 -07:00
|
|
|
if r.Err != nil {
|
|
|
|
return nil, r.Err
|
|
|
|
}
|
2015-08-24 09:04:41 -07:00
|
|
|
v, ok := r.Value.(*model.Scalar)
|
2015-03-30 10:13:36 -07:00
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("query result is not a scalar")
|
|
|
|
}
|
|
|
|
return v, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *Result) String() string {
|
|
|
|
if r.Err != nil {
|
|
|
|
return r.Err.Error()
|
|
|
|
}
|
|
|
|
if r.Value == nil {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
return r.Value.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
type (
|
|
|
|
// ErrQueryTimeout is returned if a query timed out during processing.
|
|
|
|
ErrQueryTimeout string
|
|
|
|
// ErrQueryCanceled is returned if a query was canceled during processing.
|
|
|
|
ErrQueryCanceled string
|
2017-04-04 09:22:51 -07:00
|
|
|
// ErrStorage is returned if an error was encountered in the storage layer
|
|
|
|
// during query handling.
|
|
|
|
ErrStorage error
|
2015-03-30 10:13:36 -07:00
|
|
|
)
|
|
|
|
|
2015-05-01 08:58:58 -07:00
|
|
|
func (e ErrQueryTimeout) Error() string { return fmt.Sprintf("query timed out in %s", string(e)) }
|
|
|
|
func (e ErrQueryCanceled) Error() string { return fmt.Sprintf("query was canceled in %s", string(e)) }
|
2015-03-30 10:13:36 -07:00
|
|
|
|
|
|
|
// A Query is derived from an a raw query string and can be run against an engine
|
|
|
|
// it is associated with.
|
|
|
|
type Query interface {
|
|
|
|
// Exec processes the query and
|
promql: Allow per-query contexts.
For Weaveworks' Frankenstein, we need to support multitenancy. In
Frankenstein, we initially solved this without modifying the promql
package at all: we constructed a new promql.Engine for every
query and injected a storage implementation into that engine which would
be primed to only collect data for a given user.
This is problematic to upstream, however. Prometheus assumes that there
is only one engine: the query concurrency gate is part of the engine,
and the engine contains one central cancellable context to shut down all
queries. Also, creating a new engine for every query seems like overkill.
Thus, we want to be able to pass per-query contexts into a single engine.
This change gets rid of the promql.Engine's built-in base context and
allows passing in a per-query context instead. Central cancellation of
all queries is still possible by deriving all passed-in contexts from
one central one, but this is now the responsibility of the caller. The
central query context is now created in main() and passed into the
relevant components (web handler / API, rule manager).
In a next step, the per-query context would have to be passed to the
storage implementation, so that the storage can implement multi-tenancy
or other features based on the contextual information.
2016-09-15 04:52:50 -07:00
|
|
|
Exec(ctx context.Context) *Result
|
2015-08-10 05:21:24 -07:00
|
|
|
// Statement returns the parsed statement of the query.
|
|
|
|
Statement() Statement
|
2015-03-30 10:13:36 -07:00
|
|
|
// Stats returns statistics about the lifetime of the query.
|
|
|
|
Stats() *stats.TimerGroup
|
|
|
|
// Cancel signals that a running query execution should be aborted.
|
|
|
|
Cancel()
|
|
|
|
}
|
|
|
|
|
|
|
|
// query implements the Query interface.
|
|
|
|
type query struct {
|
|
|
|
// The original query string.
|
|
|
|
q string
|
2015-08-10 05:21:24 -07:00
|
|
|
// Statement of the parsed query.
|
|
|
|
stmt Statement
|
2015-03-30 10:13:36 -07:00
|
|
|
// Timer stats for the query execution.
|
|
|
|
stats *stats.TimerGroup
|
2017-07-18 05:58:00 -07:00
|
|
|
// Cancellation function for the query.
|
2015-03-30 10:13:36 -07:00
|
|
|
cancel func()
|
|
|
|
|
|
|
|
// The engine against which the query is executed.
|
|
|
|
ng *Engine
|
|
|
|
}
|
|
|
|
|
2015-08-10 05:21:24 -07:00
|
|
|
// Statement implements the Query interface.
|
|
|
|
func (q *query) Statement() Statement {
|
|
|
|
return q.stmt
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Stats implements the Query interface.
|
|
|
|
func (q *query) Stats() *stats.TimerGroup {
|
|
|
|
return q.stats
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cancel implements the Query interface.
|
|
|
|
func (q *query) Cancel() {
|
|
|
|
if q.cancel != nil {
|
|
|
|
q.cancel()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exec implements the Query interface.
|
promql: Allow per-query contexts.
For Weaveworks' Frankenstein, we need to support multitenancy. In
Frankenstein, we initially solved this without modifying the promql
package at all: we constructed a new promql.Engine for every
query and injected a storage implementation into that engine which would
be primed to only collect data for a given user.
This is problematic to upstream, however. Prometheus assumes that there
is only one engine: the query concurrency gate is part of the engine,
and the engine contains one central cancellable context to shut down all
queries. Also, creating a new engine for every query seems like overkill.
Thus, we want to be able to pass per-query contexts into a single engine.
This change gets rid of the promql.Engine's built-in base context and
allows passing in a per-query context instead. Central cancellation of
all queries is still possible by deriving all passed-in contexts from
one central one, but this is now the responsibility of the caller. The
central query context is now created in main() and passed into the
relevant components (web handler / API, rule manager).
In a next step, the per-query context would have to be passed to the
storage implementation, so that the storage can implement multi-tenancy
or other features based on the contextual information.
2016-09-15 04:52:50 -07:00
|
|
|
func (q *query) Exec(ctx context.Context) *Result {
|
2017-05-02 16:49:29 -07:00
|
|
|
if span := opentracing.SpanFromContext(ctx); span != nil {
|
|
|
|
span.SetTag(queryTag, q.stmt.String())
|
|
|
|
}
|
|
|
|
|
promql: Allow per-query contexts.
For Weaveworks' Frankenstein, we need to support multitenancy. In
Frankenstein, we initially solved this without modifying the promql
package at all: we constructed a new promql.Engine for every
query and injected a storage implementation into that engine which would
be primed to only collect data for a given user.
This is problematic to upstream, however. Prometheus assumes that there
is only one engine: the query concurrency gate is part of the engine,
and the engine contains one central cancellable context to shut down all
queries. Also, creating a new engine for every query seems like overkill.
Thus, we want to be able to pass per-query contexts into a single engine.
This change gets rid of the promql.Engine's built-in base context and
allows passing in a per-query context instead. Central cancellation of
all queries is still possible by deriving all passed-in contexts from
one central one, but this is now the responsibility of the caller. The
central query context is now created in main() and passed into the
relevant components (web handler / API, rule manager).
In a next step, the per-query context would have to be passed to the
storage implementation, so that the storage can implement multi-tenancy
or other features based on the contextual information.
2016-09-15 04:52:50 -07:00
|
|
|
res, err := q.ng.exec(ctx, q)
|
2015-03-30 10:13:36 -07:00
|
|
|
return &Result{Err: err, Value: res}
|
|
|
|
}
|
|
|
|
|
|
|
|
// contextDone returns an error if the context was canceled or timed out.
|
|
|
|
func contextDone(ctx context.Context, env string) error {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
err := ctx.Err()
|
|
|
|
switch err {
|
|
|
|
case context.Canceled:
|
|
|
|
return ErrQueryCanceled(env)
|
|
|
|
case context.DeadlineExceeded:
|
|
|
|
return ErrQueryTimeout(env)
|
|
|
|
default:
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-19 06:28:53 -07:00
|
|
|
// Engine handles the lifetime of queries from beginning to end.
|
2016-07-11 11:27:25 -07:00
|
|
|
// It is connected to a querier.
|
2015-03-30 10:13:36 -07:00
|
|
|
type Engine struct {
|
2016-10-12 10:34:22 -07:00
|
|
|
// A Querier constructor against an underlying storage.
|
|
|
|
queryable Queryable
|
2015-04-30 15:49:19 -07:00
|
|
|
// The gate limiting the maximum number of concurrent and waiting queries.
|
promql: Allow per-query contexts.
For Weaveworks' Frankenstein, we need to support multitenancy. In
Frankenstein, we initially solved this without modifying the promql
package at all: we constructed a new promql.Engine for every
query and injected a storage implementation into that engine which would
be primed to only collect data for a given user.
This is problematic to upstream, however. Prometheus assumes that there
is only one engine: the query concurrency gate is part of the engine,
and the engine contains one central cancellable context to shut down all
queries. Also, creating a new engine for every query seems like overkill.
Thus, we want to be able to pass per-query contexts into a single engine.
This change gets rid of the promql.Engine's built-in base context and
allows passing in a per-query context instead. Central cancellation of
all queries is still possible by deriving all passed-in contexts from
one central one, but this is now the responsibility of the caller. The
central query context is now created in main() and passed into the
relevant components (web handler / API, rule manager).
In a next step, the per-query context would have to be passed to the
storage implementation, so that the storage can implement multi-tenancy
or other features based on the contextual information.
2016-09-15 04:52:50 -07:00
|
|
|
gate *queryGate
|
2015-06-15 03:49:11 -07:00
|
|
|
options *EngineOptions
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
2016-10-12 10:34:22 -07:00
|
|
|
// Queryable allows opening a storage querier.
|
|
|
|
type Queryable interface {
|
|
|
|
Querier() (local.Querier, error)
|
|
|
|
}
|
|
|
|
|
2015-03-30 10:13:36 -07:00
|
|
|
// NewEngine returns a new engine.
|
2016-10-12 10:34:22 -07:00
|
|
|
func NewEngine(queryable Queryable, o *EngineOptions) *Engine {
|
2015-06-15 03:49:11 -07:00
|
|
|
if o == nil {
|
|
|
|
o = DefaultEngineOptions
|
|
|
|
}
|
2017-01-07 06:41:25 -08:00
|
|
|
maxConcurrentQueries.Set(float64(o.MaxConcurrentQueries))
|
2015-03-30 10:13:36 -07:00
|
|
|
return &Engine{
|
2016-10-12 10:34:22 -07:00
|
|
|
queryable: queryable,
|
|
|
|
gate: newQueryGate(o.MaxConcurrentQueries),
|
|
|
|
options: o,
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-15 03:49:11 -07:00
|
|
|
// EngineOptions contains configuration parameters for an Engine.
|
|
|
|
type EngineOptions struct {
|
|
|
|
MaxConcurrentQueries int
|
|
|
|
Timeout time.Duration
|
|
|
|
}
|
|
|
|
|
|
|
|
// DefaultEngineOptions are the default engine options.
|
|
|
|
var DefaultEngineOptions = &EngineOptions{
|
|
|
|
MaxConcurrentQueries: 20,
|
|
|
|
Timeout: 2 * time.Minute,
|
|
|
|
}
|
|
|
|
|
2015-03-30 10:13:36 -07:00
|
|
|
// NewInstantQuery returns an evaluation query for the given expression at the given time.
|
2015-08-20 08:18:46 -07:00
|
|
|
func (ng *Engine) NewInstantQuery(qs string, ts model.Time) (Query, error) {
|
2015-06-25 04:44:05 -07:00
|
|
|
expr, err := ParseExpr(qs)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
qry := ng.newQuery(expr, ts, ts, 0)
|
|
|
|
qry.q = qs
|
|
|
|
|
|
|
|
return qry, nil
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewRangeQuery returns an evaluation query for the given time range and with
|
|
|
|
// the resolution set by the interval.
|
2015-08-20 08:18:46 -07:00
|
|
|
func (ng *Engine) NewRangeQuery(qs string, start, end model.Time, interval time.Duration) (Query, error) {
|
2015-04-29 02:36:41 -07:00
|
|
|
expr, err := ParseExpr(qs)
|
2015-03-30 10:13:36 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-08-24 09:04:41 -07:00
|
|
|
if expr.Type() != model.ValVector && expr.Type() != model.ValScalar {
|
2016-11-17 13:02:28 -08:00
|
|
|
return nil, fmt.Errorf("invalid expression type %q for range query, must be scalar or instant vector", documentedType(expr.Type()))
|
2015-06-09 03:52:27 -07:00
|
|
|
}
|
2015-05-11 06:56:35 -07:00
|
|
|
qry := ng.newQuery(expr, start, end, interval)
|
|
|
|
qry.q = qs
|
|
|
|
|
|
|
|
return qry, nil
|
|
|
|
}
|
|
|
|
|
2015-08-20 08:18:46 -07:00
|
|
|
func (ng *Engine) newQuery(expr Expr, start, end model.Time, interval time.Duration) *query {
|
2015-03-30 10:13:36 -07:00
|
|
|
es := &EvalStmt{
|
|
|
|
Expr: expr,
|
|
|
|
Start: start,
|
|
|
|
End: end,
|
|
|
|
Interval: interval,
|
|
|
|
}
|
2015-04-29 02:08:56 -07:00
|
|
|
qry := &query{
|
2015-08-10 05:21:24 -07:00
|
|
|
stmt: es,
|
2015-03-30 10:13:36 -07:00
|
|
|
ng: ng,
|
|
|
|
stats: stats.NewTimerGroup(),
|
|
|
|
}
|
2015-05-11 06:56:35 -07:00
|
|
|
return qry
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
2015-04-29 02:08:56 -07:00
|
|
|
// testStmt is an internal helper statement that allows execution
|
|
|
|
// of an arbitrary function during handling. It is used to test the Engine.
|
|
|
|
type testStmt func(context.Context) error
|
|
|
|
|
2016-07-11 11:27:25 -07:00
|
|
|
func (testStmt) String() string { return "test statement" }
|
|
|
|
func (testStmt) stmt() {}
|
2015-04-29 02:08:56 -07:00
|
|
|
|
2015-08-10 05:21:24 -07:00
|
|
|
func (ng *Engine) newTestQuery(f func(context.Context) error) Query {
|
2015-04-29 02:08:56 -07:00
|
|
|
qry := &query{
|
|
|
|
q: "test statement",
|
2015-08-10 05:21:24 -07:00
|
|
|
stmt: testStmt(f),
|
2015-04-29 02:08:56 -07:00
|
|
|
ng: ng,
|
|
|
|
stats: stats.NewTimerGroup(),
|
|
|
|
}
|
|
|
|
return qry
|
|
|
|
}
|
|
|
|
|
|
|
|
// exec executes the query.
|
|
|
|
//
|
|
|
|
// At this point per query only one EvalStmt is evaluated. Alert and record
|
|
|
|
// statements are not handled by the Engine.
|
promql: Allow per-query contexts.
For Weaveworks' Frankenstein, we need to support multitenancy. In
Frankenstein, we initially solved this without modifying the promql
package at all: we constructed a new promql.Engine for every
query and injected a storage implementation into that engine which would
be primed to only collect data for a given user.
This is problematic to upstream, however. Prometheus assumes that there
is only one engine: the query concurrency gate is part of the engine,
and the engine contains one central cancellable context to shut down all
queries. Also, creating a new engine for every query seems like overkill.
Thus, we want to be able to pass per-query contexts into a single engine.
This change gets rid of the promql.Engine's built-in base context and
allows passing in a per-query context instead. Central cancellation of
all queries is still possible by deriving all passed-in contexts from
one central one, but this is now the responsibility of the caller. The
central query context is now created in main() and passed into the
relevant components (web handler / API, rule manager).
In a next step, the per-query context would have to be passed to the
storage implementation, so that the storage can implement multi-tenancy
or other features based on the contextual information.
2016-09-15 04:52:50 -07:00
|
|
|
func (ng *Engine) exec(ctx context.Context, q *query) (model.Value, error) {
|
2017-01-07 06:41:25 -08:00
|
|
|
currentQueries.Inc()
|
|
|
|
defer currentQueries.Dec()
|
promql: Allow per-query contexts.
For Weaveworks' Frankenstein, we need to support multitenancy. In
Frankenstein, we initially solved this without modifying the promql
package at all: we constructed a new promql.Engine for every
query and injected a storage implementation into that engine which would
be primed to only collect data for a given user.
This is problematic to upstream, however. Prometheus assumes that there
is only one engine: the query concurrency gate is part of the engine,
and the engine contains one central cancellable context to shut down all
queries. Also, creating a new engine for every query seems like overkill.
Thus, we want to be able to pass per-query contexts into a single engine.
This change gets rid of the promql.Engine's built-in base context and
allows passing in a per-query context instead. Central cancellation of
all queries is still possible by deriving all passed-in contexts from
one central one, but this is now the responsibility of the caller. The
central query context is now created in main() and passed into the
relevant components (web handler / API, rule manager).
In a next step, the per-query context would have to be passed to the
storage implementation, so that the storage can implement multi-tenancy
or other features based on the contextual information.
2016-09-15 04:52:50 -07:00
|
|
|
ctx, cancel := context.WithTimeout(ctx, ng.options.Timeout)
|
2015-04-30 15:49:19 -07:00
|
|
|
q.cancel = cancel
|
|
|
|
|
|
|
|
queueTimer := q.stats.GetTimer(stats.ExecQueueTime).Start()
|
|
|
|
|
|
|
|
if err := ng.gate.Start(ctx); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer ng.gate.Done()
|
|
|
|
|
|
|
|
queueTimer.Stop()
|
|
|
|
|
2015-03-30 10:13:36 -07:00
|
|
|
// Cancel when execution is done or an error was raised.
|
|
|
|
defer q.cancel()
|
|
|
|
|
2015-08-10 05:21:24 -07:00
|
|
|
const env = "query execution"
|
|
|
|
|
2015-03-30 10:13:36 -07:00
|
|
|
evalTimer := q.stats.GetTimer(stats.TotalEvalTime).Start()
|
|
|
|
defer evalTimer.Stop()
|
|
|
|
|
2015-08-10 05:21:24 -07:00
|
|
|
// The base context might already be canceled on the first iteration (e.g. during shutdown).
|
|
|
|
if err := contextDone(ctx, env); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-04-29 02:08:56 -07:00
|
|
|
|
2015-08-10 05:21:24 -07:00
|
|
|
switch s := q.Statement().(type) {
|
|
|
|
case *EvalStmt:
|
|
|
|
return ng.execEvalStmt(ctx, q, s)
|
|
|
|
case testStmt:
|
|
|
|
return nil, s(ctx)
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
2015-08-10 05:21:24 -07:00
|
|
|
|
|
|
|
panic(fmt.Errorf("promql.Engine.exec: unhandled statement of type %T", q.Statement()))
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// execEvalStmt evaluates the expression of an evaluation statement for the given time range.
|
2015-08-24 09:04:41 -07:00
|
|
|
func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *EvalStmt) (model.Value, error) {
|
2016-10-12 10:34:22 -07:00
|
|
|
querier, err := ng.queryable.Querier()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer querier.Close()
|
|
|
|
|
2016-07-11 11:27:25 -07:00
|
|
|
prepareTimer := query.stats.GetTimer(stats.QueryPreparationTime).Start()
|
2016-10-12 10:34:22 -07:00
|
|
|
err = ng.populateIterators(ctx, querier, s)
|
2016-07-11 11:27:25 -07:00
|
|
|
prepareTimer.Stop()
|
2017-02-13 08:45:00 -08:00
|
|
|
queryPrepareTime.Observe(prepareTimer.ElapsedTime().Seconds())
|
|
|
|
|
2015-03-30 10:13:36 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-07-11 11:27:25 -07:00
|
|
|
defer ng.closeIterators(s)
|
2015-03-30 10:13:36 -07:00
|
|
|
|
|
|
|
evalTimer := query.stats.GetTimer(stats.InnerEvalTime).Start()
|
|
|
|
// Instant evaluation.
|
|
|
|
if s.Start == s.End && s.Interval == 0 {
|
|
|
|
evaluator := &evaluator{
|
|
|
|
Timestamp: s.Start,
|
|
|
|
ctx: ctx,
|
|
|
|
}
|
|
|
|
val, err := evaluator.Eval(s.Expr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-08-24 09:04:41 -07:00
|
|
|
// Turn matrix and vector types with protected metrics into
|
|
|
|
// model.* types.
|
|
|
|
switch v := val.(type) {
|
|
|
|
case vector:
|
|
|
|
val = v.value()
|
|
|
|
case matrix:
|
|
|
|
val = v.value()
|
|
|
|
}
|
|
|
|
|
2015-03-30 10:13:36 -07:00
|
|
|
evalTimer.Stop()
|
2017-02-13 08:45:00 -08:00
|
|
|
queryInnerEval.Observe(evalTimer.ElapsedTime().Seconds())
|
|
|
|
|
2015-03-30 10:13:36 -07:00
|
|
|
return val, nil
|
|
|
|
}
|
2015-06-09 03:52:27 -07:00
|
|
|
numSteps := int(s.End.Sub(s.Start) / s.Interval)
|
2015-03-30 10:13:36 -07:00
|
|
|
|
|
|
|
// Range evaluation.
|
2015-08-24 09:04:41 -07:00
|
|
|
sampleStreams := map[model.Fingerprint]*sampleStream{}
|
2015-03-30 10:13:36 -07:00
|
|
|
for ts := s.Start; !ts.After(s.End); ts = ts.Add(s.Interval) {
|
|
|
|
|
|
|
|
if err := contextDone(ctx, "range evaluation"); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
evaluator := &evaluator{
|
|
|
|
Timestamp: ts,
|
|
|
|
ctx: ctx,
|
|
|
|
}
|
|
|
|
val, err := evaluator.Eval(s.Expr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-06-09 03:52:27 -07:00
|
|
|
switch v := val.(type) {
|
2015-08-24 09:04:41 -07:00
|
|
|
case *model.Scalar:
|
2015-06-09 03:52:27 -07:00
|
|
|
// As the expression type does not change we can safely default to 0
|
|
|
|
// as the fingerprint for scalar expressions.
|
|
|
|
ss := sampleStreams[0]
|
|
|
|
if ss == nil {
|
2015-08-24 09:04:41 -07:00
|
|
|
ss = &sampleStream{Values: make([]model.SamplePair, 0, numSteps)}
|
2015-06-09 03:52:27 -07:00
|
|
|
sampleStreams[0] = ss
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
2015-08-22 05:52:35 -07:00
|
|
|
ss.Values = append(ss.Values, model.SamplePair{
|
2015-06-09 03:52:27 -07:00
|
|
|
Value: v.Value,
|
|
|
|
Timestamp: v.Timestamp,
|
|
|
|
})
|
2015-08-24 09:04:41 -07:00
|
|
|
case vector:
|
2015-06-09 03:52:27 -07:00
|
|
|
for _, sample := range v {
|
|
|
|
fp := sample.Metric.Metric.Fingerprint()
|
|
|
|
ss := sampleStreams[fp]
|
|
|
|
if ss == nil {
|
2015-08-24 09:04:41 -07:00
|
|
|
ss = &sampleStream{
|
2015-06-11 14:50:53 -07:00
|
|
|
Metric: sample.Metric,
|
2015-08-22 05:52:35 -07:00
|
|
|
Values: make([]model.SamplePair, 0, numSteps),
|
2015-06-11 14:50:53 -07:00
|
|
|
}
|
2015-06-09 03:52:27 -07:00
|
|
|
sampleStreams[fp] = ss
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
2015-08-22 05:52:35 -07:00
|
|
|
ss.Values = append(ss.Values, model.SamplePair{
|
2015-06-09 03:52:27 -07:00
|
|
|
Value: sample.Value,
|
|
|
|
Timestamp: sample.Timestamp,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
panic(fmt.Errorf("promql.Engine.exec: invalid expression type %q", val.Type()))
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
evalTimer.Stop()
|
2017-02-13 08:45:00 -08:00
|
|
|
queryInnerEval.Observe(evalTimer.ElapsedTime().Seconds())
|
2015-03-30 10:13:36 -07:00
|
|
|
|
|
|
|
if err := contextDone(ctx, "expression evaluation"); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
appendTimer := query.stats.GetTimer(stats.ResultAppendTime).Start()
|
2015-08-24 09:04:41 -07:00
|
|
|
mat := matrix{}
|
|
|
|
for _, ss := range sampleStreams {
|
|
|
|
mat = append(mat, ss)
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
appendTimer.Stop()
|
2017-02-13 08:45:00 -08:00
|
|
|
queryResultAppend.Observe(appendTimer.ElapsedTime().Seconds())
|
2015-03-30 10:13:36 -07:00
|
|
|
|
|
|
|
if err := contextDone(ctx, "expression evaluation"); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-08-24 09:04:41 -07:00
|
|
|
// Turn matrix type with protected metric into model.Matrix.
|
|
|
|
resMatrix := mat.value()
|
|
|
|
|
2015-03-30 10:13:36 -07:00
|
|
|
sortTimer := query.stats.GetTimer(stats.ResultSortTime).Start()
|
2015-08-24 09:04:41 -07:00
|
|
|
sort.Sort(resMatrix)
|
2015-03-30 10:13:36 -07:00
|
|
|
sortTimer.Stop()
|
2017-02-13 08:45:00 -08:00
|
|
|
queryResultSort.Observe(sortTimer.ElapsedTime().Seconds())
|
2015-08-24 09:04:41 -07:00
|
|
|
return resMatrix, nil
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
2016-10-12 10:34:22 -07:00
|
|
|
func (ng *Engine) populateIterators(ctx context.Context, querier local.Querier, s *EvalStmt) error {
|
2016-07-11 11:27:25 -07:00
|
|
|
var queryErr error
|
|
|
|
Inspect(s.Expr, func(node Node) bool {
|
|
|
|
switch n := node.(type) {
|
|
|
|
case *VectorSelector:
|
|
|
|
if s.Start.Equal(s.End) {
|
2016-10-12 10:34:22 -07:00
|
|
|
n.iterators, queryErr = querier.QueryInstant(
|
2016-09-15 15:58:06 -07:00
|
|
|
ctx,
|
2016-07-11 11:27:25 -07:00
|
|
|
s.Start.Add(-n.Offset),
|
|
|
|
StalenessDelta,
|
|
|
|
n.LabelMatchers...,
|
|
|
|
)
|
|
|
|
} else {
|
2016-10-12 10:34:22 -07:00
|
|
|
n.iterators, queryErr = querier.QueryRange(
|
2016-09-15 15:58:06 -07:00
|
|
|
ctx,
|
2016-07-11 11:27:25 -07:00
|
|
|
s.Start.Add(-n.Offset-StalenessDelta),
|
|
|
|
s.End.Add(-n.Offset),
|
|
|
|
n.LabelMatchers...,
|
|
|
|
)
|
|
|
|
}
|
2016-08-24 09:37:09 -07:00
|
|
|
if queryErr != nil {
|
2016-07-11 11:27:25 -07:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
case *MatrixSelector:
|
2016-10-12 10:34:22 -07:00
|
|
|
n.iterators, queryErr = querier.QueryRange(
|
2016-09-15 15:58:06 -07:00
|
|
|
ctx,
|
2016-07-11 11:27:25 -07:00
|
|
|
s.Start.Add(-n.Offset-n.Range),
|
|
|
|
s.End.Add(-n.Offset),
|
|
|
|
n.LabelMatchers...,
|
|
|
|
)
|
2016-08-24 09:37:09 -07:00
|
|
|
if queryErr != nil {
|
2016-07-11 11:27:25 -07:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
return queryErr
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ng *Engine) closeIterators(s *EvalStmt) {
|
|
|
|
Inspect(s.Expr, func(node Node) bool {
|
|
|
|
switch n := node.(type) {
|
|
|
|
case *VectorSelector:
|
|
|
|
for _, it := range n.iterators {
|
|
|
|
it.Close()
|
|
|
|
}
|
|
|
|
case *MatrixSelector:
|
|
|
|
for _, it := range n.iterators {
|
|
|
|
it.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-03-30 10:13:36 -07:00
|
|
|
// An evaluator evaluates given expressions at a fixed timestamp. It is attached to an
|
2016-07-11 11:27:25 -07:00
|
|
|
// engine through which it connects to a querier and reports errors. On timeout or
|
2015-03-30 10:13:36 -07:00
|
|
|
// cancellation of its context it terminates.
|
|
|
|
type evaluator struct {
|
|
|
|
ctx context.Context
|
|
|
|
|
2015-08-20 08:18:46 -07:00
|
|
|
Timestamp model.Time
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// fatalf causes a panic with the input formatted into an error.
|
|
|
|
func (ev *evaluator) errorf(format string, args ...interface{}) {
|
|
|
|
ev.error(fmt.Errorf(format, args...))
|
|
|
|
}
|
|
|
|
|
|
|
|
// fatal causes a panic with the given error.
|
|
|
|
func (ev *evaluator) error(err error) {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// recover is the handler that turns panics into returns from the top level of evaluation.
|
|
|
|
func (ev *evaluator) recover(errp *error) {
|
|
|
|
e := recover()
|
|
|
|
if e != nil {
|
|
|
|
if _, ok := e.(runtime.Error); ok {
|
2015-08-19 06:28:53 -07:00
|
|
|
// Print the stack trace but do not inhibit the running application.
|
|
|
|
buf := make([]byte, 64<<10)
|
|
|
|
buf = buf[:runtime.Stack(buf, false)]
|
|
|
|
|
|
|
|
log.Errorf("parser panic: %v\n%s", e, buf)
|
|
|
|
*errp = fmt.Errorf("unexpected error")
|
|
|
|
} else {
|
|
|
|
*errp = e.(error)
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// evalScalar attempts to evaluate e to a scalar value and errors otherwise.
|
2015-08-24 09:04:41 -07:00
|
|
|
func (ev *evaluator) evalScalar(e Expr) *model.Scalar {
|
2015-03-30 10:13:36 -07:00
|
|
|
val := ev.eval(e)
|
2015-08-24 09:04:41 -07:00
|
|
|
sv, ok := val.(*model.Scalar)
|
2015-03-30 10:13:36 -07:00
|
|
|
if !ok {
|
2016-11-17 13:02:28 -08:00
|
|
|
ev.errorf("expected scalar but got %s", documentedType(val.Type()))
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
return sv
|
|
|
|
}
|
|
|
|
|
|
|
|
// evalVector attempts to evaluate e to a vector value and errors otherwise.
|
2015-08-24 09:04:41 -07:00
|
|
|
func (ev *evaluator) evalVector(e Expr) vector {
|
2015-03-30 10:13:36 -07:00
|
|
|
val := ev.eval(e)
|
2015-08-24 09:04:41 -07:00
|
|
|
vec, ok := val.(vector)
|
2015-03-30 10:13:36 -07:00
|
|
|
if !ok {
|
2016-11-17 13:02:28 -08:00
|
|
|
ev.errorf("expected instant vector but got %s", documentedType(val.Type()))
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
return vec
|
|
|
|
}
|
|
|
|
|
|
|
|
// evalInt attempts to evaluate e into an integer and errors otherwise.
|
2016-11-04 16:48:32 -07:00
|
|
|
func (ev *evaluator) evalInt(e Expr) int64 {
|
2015-03-30 10:13:36 -07:00
|
|
|
sc := ev.evalScalar(e)
|
2016-11-04 16:48:32 -07:00
|
|
|
if !convertibleToInt64(sc.Value) {
|
|
|
|
ev.errorf("scalar value %v overflows int64", sc.Value)
|
|
|
|
}
|
|
|
|
return int64(sc.Value)
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// evalFloat attempts to evaluate e into a float and errors otherwise.
|
|
|
|
func (ev *evaluator) evalFloat(e Expr) float64 {
|
|
|
|
sc := ev.evalScalar(e)
|
|
|
|
return float64(sc.Value)
|
|
|
|
}
|
|
|
|
|
|
|
|
// evalMatrix attempts to evaluate e into a matrix and errors otherwise.
|
2016-11-17 13:02:28 -08:00
|
|
|
// The error message uses the term "range vector" to match the user facing
|
|
|
|
// documentation.
|
2015-08-24 09:04:41 -07:00
|
|
|
func (ev *evaluator) evalMatrix(e Expr) matrix {
|
2015-03-30 10:13:36 -07:00
|
|
|
val := ev.eval(e)
|
2015-08-24 09:04:41 -07:00
|
|
|
mat, ok := val.(matrix)
|
2015-03-30 10:13:36 -07:00
|
|
|
if !ok {
|
2016-11-17 13:02:28 -08:00
|
|
|
ev.errorf("expected range vector but got %s", documentedType(val.Type()))
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
return mat
|
|
|
|
}
|
|
|
|
|
2015-08-17 14:25:53 -07:00
|
|
|
// evalString attempts to evaluate e to a string value and errors otherwise.
|
2015-08-24 09:04:41 -07:00
|
|
|
func (ev *evaluator) evalString(e Expr) *model.String {
|
2015-08-17 14:25:53 -07:00
|
|
|
val := ev.eval(e)
|
2015-08-24 09:04:41 -07:00
|
|
|
sv, ok := val.(*model.String)
|
2015-08-17 14:25:53 -07:00
|
|
|
if !ok {
|
2016-11-17 13:02:28 -08:00
|
|
|
ev.errorf("expected string but got %s", documentedType(val.Type()))
|
2015-08-17 14:25:53 -07:00
|
|
|
}
|
|
|
|
return sv
|
|
|
|
}
|
|
|
|
|
2015-03-30 10:13:36 -07:00
|
|
|
// evalOneOf evaluates e and errors unless the result is of one of the given types.
|
2015-08-24 09:04:41 -07:00
|
|
|
func (ev *evaluator) evalOneOf(e Expr, t1, t2 model.ValueType) model.Value {
|
2015-03-30 10:13:36 -07:00
|
|
|
val := ev.eval(e)
|
|
|
|
if val.Type() != t1 && val.Type() != t2 {
|
2016-11-17 13:02:28 -08:00
|
|
|
ev.errorf("expected %s or %s but got %s", documentedType(t1), documentedType(t2), documentedType(val.Type()))
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
return val
|
|
|
|
}
|
|
|
|
|
2015-08-24 09:04:41 -07:00
|
|
|
func (ev *evaluator) Eval(expr Expr) (v model.Value, err error) {
|
2015-03-30 10:13:36 -07:00
|
|
|
defer ev.recover(&err)
|
|
|
|
return ev.eval(expr), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// eval evaluates the given expression as the given AST expression node requires.
|
2015-08-24 09:04:41 -07:00
|
|
|
func (ev *evaluator) eval(expr Expr) model.Value {
|
2015-03-30 10:13:36 -07:00
|
|
|
// This is the top-level evaluation method.
|
2017-07-18 05:58:00 -07:00
|
|
|
// Thus, we check for timeout/cancellation here.
|
2015-03-30 10:13:36 -07:00
|
|
|
if err := contextDone(ev.ctx, "expression evaluation"); err != nil {
|
|
|
|
ev.error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
switch e := expr.(type) {
|
|
|
|
case *AggregateExpr:
|
|
|
|
vector := ev.evalVector(e.Expr)
|
2016-07-04 05:10:42 -07:00
|
|
|
return ev.aggregation(e.Op, e.Grouping, e.Without, e.KeepCommonLabels, e.Param, vector)
|
2015-03-30 10:13:36 -07:00
|
|
|
|
|
|
|
case *BinaryExpr:
|
2015-08-24 09:04:41 -07:00
|
|
|
lhs := ev.evalOneOf(e.LHS, model.ValScalar, model.ValVector)
|
|
|
|
rhs := ev.evalOneOf(e.RHS, model.ValScalar, model.ValVector)
|
2015-03-30 10:13:36 -07:00
|
|
|
|
|
|
|
switch lt, rt := lhs.Type(), rhs.Type(); {
|
2015-08-24 09:04:41 -07:00
|
|
|
case lt == model.ValScalar && rt == model.ValScalar:
|
|
|
|
return &model.Scalar{
|
|
|
|
Value: scalarBinop(e.Op, lhs.(*model.Scalar).Value, rhs.(*model.Scalar).Value),
|
2015-03-30 10:13:36 -07:00
|
|
|
Timestamp: ev.Timestamp,
|
|
|
|
}
|
|
|
|
|
2015-08-24 09:04:41 -07:00
|
|
|
case lt == model.ValVector && rt == model.ValVector:
|
2015-05-16 07:35:52 -07:00
|
|
|
switch e.Op {
|
|
|
|
case itemLAND:
|
2015-08-24 09:04:41 -07:00
|
|
|
return ev.vectorAnd(lhs.(vector), rhs.(vector), e.VectorMatching)
|
2015-05-16 07:35:52 -07:00
|
|
|
case itemLOR:
|
2015-08-24 09:04:41 -07:00
|
|
|
return ev.vectorOr(lhs.(vector), rhs.(vector), e.VectorMatching)
|
2016-04-02 15:52:18 -07:00
|
|
|
case itemLUnless:
|
|
|
|
return ev.vectorUnless(lhs.(vector), rhs.(vector), e.VectorMatching)
|
2015-05-16 07:35:52 -07:00
|
|
|
default:
|
2015-09-02 06:51:44 -07:00
|
|
|
return ev.vectorBinop(e.Op, lhs.(vector), rhs.(vector), e.VectorMatching, e.ReturnBool)
|
2015-05-16 05:00:11 -07:00
|
|
|
}
|
2015-08-24 09:04:41 -07:00
|
|
|
case lt == model.ValVector && rt == model.ValScalar:
|
2015-09-02 06:51:44 -07:00
|
|
|
return ev.vectorScalarBinop(e.Op, lhs.(vector), rhs.(*model.Scalar), false, e.ReturnBool)
|
2015-03-30 10:13:36 -07:00
|
|
|
|
2015-08-24 09:04:41 -07:00
|
|
|
case lt == model.ValScalar && rt == model.ValVector:
|
2015-09-02 06:51:44 -07:00
|
|
|
return ev.vectorScalarBinop(e.Op, rhs.(vector), lhs.(*model.Scalar), true, e.ReturnBool)
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
case *Call:
|
|
|
|
return e.Func.Call(ev, e.Args)
|
|
|
|
|
|
|
|
case *MatrixSelector:
|
|
|
|
return ev.matrixSelector(e)
|
|
|
|
|
|
|
|
case *NumberLiteral:
|
2015-08-24 09:04:41 -07:00
|
|
|
return &model.Scalar{Value: e.Val, Timestamp: ev.Timestamp}
|
2015-03-30 10:13:36 -07:00
|
|
|
|
|
|
|
case *ParenExpr:
|
|
|
|
return ev.eval(e.Expr)
|
|
|
|
|
|
|
|
case *StringLiteral:
|
2015-08-24 09:04:41 -07:00
|
|
|
return &model.String{Value: e.Val, Timestamp: ev.Timestamp}
|
2015-03-30 10:13:36 -07:00
|
|
|
|
|
|
|
case *UnaryExpr:
|
2015-08-24 09:04:41 -07:00
|
|
|
se := ev.evalOneOf(e.Expr, model.ValScalar, model.ValVector)
|
2015-08-04 05:57:34 -07:00
|
|
|
// Only + and - are possible operators.
|
2015-03-30 10:13:36 -07:00
|
|
|
if e.Op == itemSUB {
|
2015-08-04 05:57:34 -07:00
|
|
|
switch v := se.(type) {
|
2015-08-24 09:04:41 -07:00
|
|
|
case *model.Scalar:
|
2015-08-04 05:57:34 -07:00
|
|
|
v.Value = -v.Value
|
2015-08-24 09:04:41 -07:00
|
|
|
case vector:
|
2015-08-04 05:57:34 -07:00
|
|
|
for i, sv := range v {
|
|
|
|
v[i].Value = -sv.Value
|
|
|
|
}
|
|
|
|
}
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
2015-08-04 05:57:34 -07:00
|
|
|
return se
|
2015-03-30 10:13:36 -07:00
|
|
|
|
|
|
|
case *VectorSelector:
|
|
|
|
return ev.vectorSelector(e)
|
|
|
|
}
|
|
|
|
panic(fmt.Errorf("unhandled expression of type: %T", expr))
|
|
|
|
}
|
|
|
|
|
|
|
|
// vectorSelector evaluates a *VectorSelector expression.
|
2015-08-24 09:04:41 -07:00
|
|
|
func (ev *evaluator) vectorSelector(node *VectorSelector) vector {
|
|
|
|
vec := vector{}
|
2016-07-11 11:27:25 -07:00
|
|
|
for _, it := range node.iterators {
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
refTime := ev.Timestamp.Add(-node.Offset)
|
|
|
|
samplePair := it.ValueAtOrBeforeTime(refTime)
|
|
|
|
if samplePair.Timestamp.Before(refTime.Add(-StalenessDelta)) {
|
|
|
|
continue // Sample outside of staleness policy window.
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
vec = append(vec, &sample{
|
2016-07-11 11:27:25 -07:00
|
|
|
Metric: it.Metric(),
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
Value: samplePair.Value,
|
|
|
|
Timestamp: ev.Timestamp,
|
|
|
|
})
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
return vec
|
|
|
|
}
|
|
|
|
|
|
|
|
// matrixSelector evaluates a *MatrixSelector expression.
|
2015-08-24 09:04:41 -07:00
|
|
|
func (ev *evaluator) matrixSelector(node *MatrixSelector) matrix {
|
2015-03-30 10:13:36 -07:00
|
|
|
interval := metric.Interval{
|
|
|
|
OldestInclusive: ev.Timestamp.Add(-node.Range - node.Offset),
|
|
|
|
NewestInclusive: ev.Timestamp.Add(-node.Offset),
|
|
|
|
}
|
|
|
|
|
2015-08-24 09:04:41 -07:00
|
|
|
sampleStreams := make([]*sampleStream, 0, len(node.iterators))
|
2016-07-11 11:27:25 -07:00
|
|
|
for _, it := range node.iterators {
|
2015-05-20 10:13:06 -07:00
|
|
|
samplePairs := it.RangeValues(interval)
|
2015-03-30 10:13:36 -07:00
|
|
|
if len(samplePairs) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if node.Offset != 0 {
|
|
|
|
for _, sp := range samplePairs {
|
|
|
|
sp.Timestamp = sp.Timestamp.Add(node.Offset)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-24 09:04:41 -07:00
|
|
|
sampleStream := &sampleStream{
|
2016-07-11 11:27:25 -07:00
|
|
|
Metric: it.Metric(),
|
2015-03-30 10:13:36 -07:00
|
|
|
Values: samplePairs,
|
|
|
|
}
|
|
|
|
sampleStreams = append(sampleStreams, sampleStream)
|
|
|
|
}
|
2015-08-24 09:04:41 -07:00
|
|
|
return matrix(sampleStreams)
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
2015-08-24 09:04:41 -07:00
|
|
|
func (ev *evaluator) vectorAnd(lhs, rhs vector, matching *VectorMatching) vector {
|
2015-05-16 04:33:03 -07:00
|
|
|
if matching.Card != CardManyToMany {
|
2016-04-02 15:52:18 -07:00
|
|
|
panic("set operations must only use many-to-many matching")
|
2015-05-16 04:33:03 -07:00
|
|
|
}
|
2016-06-23 09:23:44 -07:00
|
|
|
sigf := signatureFunc(matching.On, matching.MatchingLabels...)
|
2015-05-16 04:33:03 -07:00
|
|
|
|
2015-08-24 09:04:41 -07:00
|
|
|
var result vector
|
2015-05-16 04:33:03 -07:00
|
|
|
// The set of signatures for the right-hand side vector.
|
|
|
|
rightSigs := map[uint64]struct{}{}
|
|
|
|
// Add all rhs samples to a map so we can easily find matches later.
|
|
|
|
for _, rs := range rhs {
|
2015-05-16 07:35:52 -07:00
|
|
|
rightSigs[sigf(rs.Metric)] = struct{}{}
|
2015-05-16 04:33:03 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, ls := range lhs {
|
|
|
|
// If there's a matching entry in the right-hand side vector, add the sample.
|
2015-05-16 07:35:52 -07:00
|
|
|
if _, ok := rightSigs[sigf(ls.Metric)]; ok {
|
2015-05-16 04:33:03 -07:00
|
|
|
result = append(result, ls)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2015-08-24 09:04:41 -07:00
|
|
|
func (ev *evaluator) vectorOr(lhs, rhs vector, matching *VectorMatching) vector {
|
2015-05-16 05:00:11 -07:00
|
|
|
if matching.Card != CardManyToMany {
|
2016-04-02 15:52:18 -07:00
|
|
|
panic("set operations must only use many-to-many matching")
|
2015-05-16 05:00:11 -07:00
|
|
|
}
|
2016-06-23 09:23:44 -07:00
|
|
|
sigf := signatureFunc(matching.On, matching.MatchingLabels...)
|
2015-05-16 05:00:11 -07:00
|
|
|
|
2015-08-24 09:04:41 -07:00
|
|
|
var result vector
|
2015-05-16 05:00:11 -07:00
|
|
|
leftSigs := map[uint64]struct{}{}
|
|
|
|
// Add everything from the left-hand-side vector.
|
|
|
|
for _, ls := range lhs {
|
2015-05-16 07:35:52 -07:00
|
|
|
leftSigs[sigf(ls.Metric)] = struct{}{}
|
2015-05-16 05:00:11 -07:00
|
|
|
result = append(result, ls)
|
|
|
|
}
|
|
|
|
// Add all right-hand side elements which have not been added from the left-hand side.
|
|
|
|
for _, rs := range rhs {
|
2015-05-16 07:35:52 -07:00
|
|
|
if _, ok := leftSigs[sigf(rs.Metric)]; !ok {
|
2015-05-16 05:00:11 -07:00
|
|
|
result = append(result, rs)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2016-04-02 15:52:18 -07:00
|
|
|
func (ev *evaluator) vectorUnless(lhs, rhs vector, matching *VectorMatching) vector {
|
|
|
|
if matching.Card != CardManyToMany {
|
|
|
|
panic("set operations must only use many-to-many matching")
|
|
|
|
}
|
2016-06-23 09:23:44 -07:00
|
|
|
sigf := signatureFunc(matching.On, matching.MatchingLabels...)
|
2016-04-02 15:52:18 -07:00
|
|
|
|
|
|
|
rightSigs := map[uint64]struct{}{}
|
|
|
|
for _, rs := range rhs {
|
|
|
|
rightSigs[sigf(rs.Metric)] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
var result vector
|
|
|
|
for _, ls := range lhs {
|
|
|
|
if _, ok := rightSigs[sigf(ls.Metric)]; !ok {
|
|
|
|
result = append(result, ls)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
|
|
|
// vectorBinop evaluates a binary operation between two vectors, excluding set operators.
|
2015-09-02 06:51:44 -07:00
|
|
|
func (ev *evaluator) vectorBinop(op itemType, lhs, rhs vector, matching *VectorMatching, returnBool bool) vector {
|
2015-05-16 07:35:52 -07:00
|
|
|
if matching.Card == CardManyToMany {
|
2016-04-02 15:52:18 -07:00
|
|
|
panic("many-to-many only allowed for set operators")
|
2015-05-16 07:35:52 -07:00
|
|
|
}
|
|
|
|
var (
|
2016-04-21 07:53:14 -07:00
|
|
|
result = vector{}
|
2016-06-23 09:23:44 -07:00
|
|
|
sigf = signatureFunc(matching.On, matching.MatchingLabels...)
|
2015-05-16 07:35:52 -07:00
|
|
|
)
|
|
|
|
|
2015-03-30 10:13:36 -07:00
|
|
|
// The control flow below handles one-to-one or many-to-one matching.
|
|
|
|
// For one-to-many, swap sidedness and account for the swap when calculating
|
|
|
|
// values.
|
|
|
|
if matching.Card == CardOneToMany {
|
|
|
|
lhs, rhs = rhs, lhs
|
|
|
|
}
|
2015-05-16 07:35:52 -07:00
|
|
|
|
2015-03-30 10:13:36 -07:00
|
|
|
// All samples from the rhs hashed by the matching label/values.
|
2015-08-24 09:04:41 -07:00
|
|
|
rightSigs := map[uint64]*sample{}
|
2015-03-30 10:13:36 -07:00
|
|
|
|
|
|
|
// Add all rhs samples to a map so we can easily find matches later.
|
|
|
|
for _, rs := range rhs {
|
2015-05-16 07:35:52 -07:00
|
|
|
sig := sigf(rs.Metric)
|
2015-03-30 10:13:36 -07:00
|
|
|
// The rhs is guaranteed to be the 'one' side. Having multiple samples
|
2015-05-16 07:35:52 -07:00
|
|
|
// with the same signature means that the matching is many-to-many.
|
|
|
|
if _, found := rightSigs[sig]; found {
|
2015-03-30 10:13:36 -07:00
|
|
|
// Many-to-many matching not allowed.
|
2015-05-16 07:35:52 -07:00
|
|
|
ev.errorf("many-to-many matching not allowed: matching labels must be unique on one side")
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
2015-05-16 07:35:52 -07:00
|
|
|
rightSigs[sig] = rs
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
2015-05-16 07:35:52 -07:00
|
|
|
// Tracks the match-signature. For one-to-one operations the value is nil. For many-to-one
|
|
|
|
// the value is a set of signatures to detect duplicated result elements.
|
|
|
|
matchedSigs := map[uint64]map[uint64]struct{}{}
|
|
|
|
|
2015-03-30 10:13:36 -07:00
|
|
|
// For all lhs samples find a respective rhs sample and perform
|
|
|
|
// the binary operation.
|
|
|
|
for _, ls := range lhs {
|
2015-05-16 07:35:52 -07:00
|
|
|
sig := sigf(ls.Metric)
|
2015-03-30 10:13:36 -07:00
|
|
|
|
2015-05-16 07:35:52 -07:00
|
|
|
rs, found := rightSigs[sig] // Look for a match in the rhs vector.
|
2015-03-30 10:13:36 -07:00
|
|
|
if !found {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2015-05-16 04:33:03 -07:00
|
|
|
// Account for potentially swapped sidedness.
|
|
|
|
vl, vr := ls.Value, rs.Value
|
|
|
|
if matching.Card == CardOneToMany {
|
|
|
|
vl, vr = vr, vl
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
2015-05-16 07:35:52 -07:00
|
|
|
value, keep := vectorElemBinop(op, vl, vr)
|
2015-09-02 06:51:44 -07:00
|
|
|
if returnBool {
|
|
|
|
if keep {
|
|
|
|
value = 1.0
|
|
|
|
} else {
|
|
|
|
value = 0.0
|
|
|
|
}
|
|
|
|
} else if !keep {
|
2015-05-16 07:35:52 -07:00
|
|
|
continue
|
|
|
|
}
|
2016-04-21 07:53:14 -07:00
|
|
|
metric := resultMetric(ls.Metric, rs.Metric, op, matching)
|
2015-03-30 10:13:36 -07:00
|
|
|
|
2015-05-16 07:35:52 -07:00
|
|
|
insertedSigs, exists := matchedSigs[sig]
|
|
|
|
if matching.Card == CardOneToOne {
|
|
|
|
if exists {
|
|
|
|
ev.errorf("multiple matches for labels: many-to-one matching must be explicit (group_left/group_right)")
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
2016-02-09 18:47:00 -08:00
|
|
|
matchedSigs[sig] = nil // Set existence to true.
|
2015-05-16 07:35:52 -07:00
|
|
|
} else {
|
|
|
|
// In many-to-one matching the grouping labels have to ensure a unique metric
|
|
|
|
// for the result vector. Check whether those labels have already been added for
|
|
|
|
// the same matching labels.
|
2016-04-21 07:53:14 -07:00
|
|
|
insertSig := uint64(metric.Metric.Fingerprint())
|
2015-05-16 07:35:52 -07:00
|
|
|
if !exists {
|
|
|
|
insertedSigs = map[uint64]struct{}{}
|
|
|
|
matchedSigs[sig] = insertedSigs
|
|
|
|
} else if _, duplicate := insertedSigs[insertSig]; duplicate {
|
|
|
|
ev.errorf("multiple matches for labels: grouping labels must ensure unique matches")
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
2015-05-16 07:35:52 -07:00
|
|
|
insertedSigs[insertSig] = struct{}{}
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
2015-08-24 09:04:41 -07:00
|
|
|
result = append(result, &sample{
|
2015-05-16 07:35:52 -07:00
|
|
|
Metric: metric,
|
|
|
|
Value: value,
|
|
|
|
Timestamp: ev.Timestamp,
|
|
|
|
})
|
|
|
|
}
|
2015-03-30 10:13:36 -07:00
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2015-05-16 07:35:52 -07:00
|
|
|
// signatureFunc returns a function that calculates the signature for a metric
|
2016-06-23 09:49:22 -07:00
|
|
|
// ignoring the provided labels. If on, then the given labels are only used instead.
|
2016-06-23 09:23:44 -07:00
|
|
|
func signatureFunc(on bool, labels ...model.LabelName) func(m metric.Metric) uint64 {
|
2016-06-23 09:49:22 -07:00
|
|
|
if !on {
|
2015-08-24 09:04:41 -07:00
|
|
|
return func(m metric.Metric) uint64 {
|
2016-04-21 03:45:06 -07:00
|
|
|
tmp := m.Metric.Clone()
|
|
|
|
for _, l := range labels {
|
|
|
|
delete(tmp, l)
|
|
|
|
}
|
|
|
|
delete(tmp, model.MetricNameLabel)
|
|
|
|
return uint64(tmp.Fingerprint())
|
2015-05-16 07:35:52 -07:00
|
|
|
}
|
|
|
|
}
|
2015-08-24 09:04:41 -07:00
|
|
|
return func(m metric.Metric) uint64 {
|
2015-08-20 08:18:46 -07:00
|
|
|
return model.SignatureForLabels(m.Metric, labels...)
|
2015-05-16 07:35:52 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// resultMetric returns the metric for the given sample(s) based on the vector
|
|
|
|
// binary operation and the matching options.
|
2016-04-21 07:53:14 -07:00
|
|
|
func resultMetric(lhs, rhs metric.Metric, op itemType, matching *VectorMatching) metric.Metric {
|
2016-04-21 10:41:27 -07:00
|
|
|
if shouldDropMetricName(op) {
|
|
|
|
lhs.Del(model.MetricNameLabel)
|
|
|
|
}
|
2016-06-23 09:23:44 -07:00
|
|
|
if !matching.On {
|
2016-04-21 07:53:14 -07:00
|
|
|
if matching.Card == CardOneToOne {
|
2016-04-26 06:28:36 -07:00
|
|
|
for _, l := range matching.MatchingLabels {
|
2016-04-21 07:53:14 -07:00
|
|
|
lhs.Del(l)
|
|
|
|
}
|
2015-05-16 07:35:52 -07:00
|
|
|
}
|
2016-04-21 07:53:14 -07:00
|
|
|
for _, ln := range matching.Include {
|
2016-04-21 10:41:27 -07:00
|
|
|
// Included labels from the `group_x` modifier are taken from the "one"-side.
|
2016-04-21 07:53:14 -07:00
|
|
|
value := rhs.Metric[ln]
|
|
|
|
if value != "" {
|
|
|
|
lhs.Set(ln, rhs.Metric[ln])
|
|
|
|
} else {
|
|
|
|
lhs.Del(ln)
|
|
|
|
}
|
2016-04-21 03:45:06 -07:00
|
|
|
}
|
2016-04-21 07:53:14 -07:00
|
|
|
return lhs
|
2015-05-16 07:35:52 -07:00
|
|
|
}
|
2016-02-09 18:47:00 -08:00
|
|
|
// As we definitely write, creating a new metric is the easiest solution.
|
2015-08-20 08:18:46 -07:00
|
|
|
m := model.Metric{}
|
2016-04-21 10:41:27 -07:00
|
|
|
if matching.Card == CardOneToOne {
|
2016-04-26 06:28:36 -07:00
|
|
|
for _, ln := range matching.MatchingLabels {
|
2016-04-21 10:41:27 -07:00
|
|
|
if v, ok := lhs.Metric[ln]; ok {
|
|
|
|
m[ln] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for k, v := range lhs.Metric {
|
|
|
|
m[k] = v
|
2016-04-21 07:53:14 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, ln := range matching.Include {
|
2016-04-21 10:41:27 -07:00
|
|
|
// Included labels from the `group_x` modifier are taken from the "one"-side .
|
|
|
|
if v, ok := rhs.Metric[ln]; ok {
|
2015-05-16 07:35:52 -07:00
|
|
|
m[ln] = v
|
2016-04-21 11:03:10 -07:00
|
|
|
} else {
|
|
|
|
delete(m, ln)
|
2015-05-16 07:35:52 -07:00
|
|
|
}
|
|
|
|
}
|
2015-08-24 09:04:41 -07:00
|
|
|
return metric.Metric{Metric: m, Copied: false}
|
2015-05-16 07:35:52 -07:00
|
|
|
}
|
|
|
|
|
2015-03-30 10:13:36 -07:00
|
|
|
// vectorScalarBinop evaluates a binary operation between a vector and a scalar.
|
2015-09-02 06:51:44 -07:00
|
|
|
func (ev *evaluator) vectorScalarBinop(op itemType, lhs vector, rhs *model.Scalar, swap, returnBool bool) vector {
|
2015-08-24 09:04:41 -07:00
|
|
|
vec := make(vector, 0, len(lhs))
|
2015-03-30 10:13:36 -07:00
|
|
|
|
|
|
|
for _, lhsSample := range lhs {
|
|
|
|
lv, rv := lhsSample.Value, rhs.Value
|
|
|
|
// lhs always contains the vector. If the original position was different
|
|
|
|
// swap for calculating the value.
|
|
|
|
if swap {
|
|
|
|
lv, rv = rv, lv
|
|
|
|
}
|
|
|
|
value, keep := vectorElemBinop(op, lv, rv)
|
2015-09-02 06:51:44 -07:00
|
|
|
if returnBool {
|
|
|
|
if keep {
|
|
|
|
value = 1.0
|
|
|
|
} else {
|
|
|
|
value = 0.0
|
|
|
|
}
|
|
|
|
keep = true
|
|
|
|
}
|
2015-03-30 10:13:36 -07:00
|
|
|
if keep {
|
|
|
|
lhsSample.Value = value
|
|
|
|
if shouldDropMetricName(op) {
|
2015-08-20 08:18:46 -07:00
|
|
|
lhsSample.Metric.Del(model.MetricNameLabel)
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
2015-08-24 09:04:41 -07:00
|
|
|
vec = append(vec, lhsSample)
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
}
|
2015-08-24 09:04:41 -07:00
|
|
|
return vec
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// scalarBinop evaluates a binary operation between two scalars.
|
2015-08-20 08:18:46 -07:00
|
|
|
func scalarBinop(op itemType, lhs, rhs model.SampleValue) model.SampleValue {
|
2015-03-30 10:13:36 -07:00
|
|
|
switch op {
|
|
|
|
case itemADD:
|
|
|
|
return lhs + rhs
|
|
|
|
case itemSUB:
|
|
|
|
return lhs - rhs
|
|
|
|
case itemMUL:
|
|
|
|
return lhs * rhs
|
|
|
|
case itemDIV:
|
|
|
|
return lhs / rhs
|
2016-05-29 02:06:14 -07:00
|
|
|
case itemPOW:
|
|
|
|
return model.SampleValue(math.Pow(float64(lhs), float64(rhs)))
|
2015-03-30 10:13:36 -07:00
|
|
|
case itemMOD:
|
2016-11-08 12:03:31 -08:00
|
|
|
return model.SampleValue(math.Mod(float64(lhs), float64(rhs)))
|
2015-03-30 10:13:36 -07:00
|
|
|
case itemEQL:
|
|
|
|
return btos(lhs == rhs)
|
|
|
|
case itemNEQ:
|
|
|
|
return btos(lhs != rhs)
|
|
|
|
case itemGTR:
|
|
|
|
return btos(lhs > rhs)
|
|
|
|
case itemLSS:
|
|
|
|
return btos(lhs < rhs)
|
|
|
|
case itemGTE:
|
|
|
|
return btos(lhs >= rhs)
|
|
|
|
case itemLTE:
|
|
|
|
return btos(lhs <= rhs)
|
|
|
|
}
|
|
|
|
panic(fmt.Errorf("operator %q not allowed for scalar operations", op))
|
|
|
|
}
|
|
|
|
|
|
|
|
// vectorElemBinop evaluates a binary operation between two vector elements.
|
2015-08-20 08:18:46 -07:00
|
|
|
func vectorElemBinop(op itemType, lhs, rhs model.SampleValue) (model.SampleValue, bool) {
|
2015-03-30 10:13:36 -07:00
|
|
|
switch op {
|
|
|
|
case itemADD:
|
|
|
|
return lhs + rhs, true
|
|
|
|
case itemSUB:
|
|
|
|
return lhs - rhs, true
|
|
|
|
case itemMUL:
|
|
|
|
return lhs * rhs, true
|
|
|
|
case itemDIV:
|
|
|
|
return lhs / rhs, true
|
2016-05-29 02:06:14 -07:00
|
|
|
case itemPOW:
|
|
|
|
return model.SampleValue(math.Pow(float64(lhs), float64(rhs))), true
|
2015-03-30 10:13:36 -07:00
|
|
|
case itemMOD:
|
2016-11-08 12:03:31 -08:00
|
|
|
return model.SampleValue(math.Mod(float64(lhs), float64(rhs))), true
|
2015-03-30 10:13:36 -07:00
|
|
|
case itemEQL:
|
|
|
|
return lhs, lhs == rhs
|
|
|
|
case itemNEQ:
|
|
|
|
return lhs, lhs != rhs
|
|
|
|
case itemGTR:
|
|
|
|
return lhs, lhs > rhs
|
|
|
|
case itemLSS:
|
|
|
|
return lhs, lhs < rhs
|
|
|
|
case itemGTE:
|
|
|
|
return lhs, lhs >= rhs
|
|
|
|
case itemLTE:
|
|
|
|
return lhs, lhs <= rhs
|
|
|
|
}
|
|
|
|
panic(fmt.Errorf("operator %q not allowed for operations between vectors", op))
|
|
|
|
}
|
|
|
|
|
|
|
|
// labelIntersection returns the metric of common label/value pairs of two input metrics.
|
2015-08-24 09:04:41 -07:00
|
|
|
func labelIntersection(metric1, metric2 metric.Metric) metric.Metric {
|
2015-03-30 10:13:36 -07:00
|
|
|
for label, value := range metric1.Metric {
|
|
|
|
if metric2.Metric[label] != value {
|
2015-08-20 08:18:46 -07:00
|
|
|
metric1.Del(label)
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return metric1
|
|
|
|
}
|
|
|
|
|
|
|
|
type groupedAggregation struct {
|
2015-08-24 09:04:41 -07:00
|
|
|
labels metric.Metric
|
2015-08-20 08:18:46 -07:00
|
|
|
value model.SampleValue
|
|
|
|
valuesSquaredSum model.SampleValue
|
2015-03-30 10:13:36 -07:00
|
|
|
groupCount int
|
2016-07-04 05:10:42 -07:00
|
|
|
heap vectorByValueHeap
|
|
|
|
reverseHeap vectorByReverseValueHeap
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// aggregation evaluates an aggregation operation on a vector.
|
2016-07-04 05:10:42 -07:00
|
|
|
func (ev *evaluator) aggregation(op itemType, grouping model.LabelNames, without bool, keepCommon bool, param Expr, vec vector) vector {
|
2015-03-30 10:13:36 -07:00
|
|
|
|
|
|
|
result := map[uint64]*groupedAggregation{}
|
2016-11-04 16:48:32 -07:00
|
|
|
var k int64
|
2016-07-04 05:10:42 -07:00
|
|
|
if op == itemTopK || op == itemBottomK {
|
|
|
|
k = ev.evalInt(param)
|
|
|
|
if k < 1 {
|
|
|
|
return vector{}
|
|
|
|
}
|
|
|
|
}
|
2016-07-08 05:48:48 -07:00
|
|
|
var q float64
|
|
|
|
if op == itemQuantile {
|
|
|
|
q = ev.evalFloat(param)
|
|
|
|
}
|
2016-07-05 09:12:19 -07:00
|
|
|
var valueLabel model.LabelName
|
|
|
|
if op == itemCountValues {
|
|
|
|
valueLabel = model.LabelName(ev.evalString(param).Value)
|
|
|
|
if !without {
|
|
|
|
grouping = append(grouping, valueLabel)
|
|
|
|
}
|
|
|
|
}
|
2015-03-30 10:13:36 -07:00
|
|
|
|
2016-07-04 05:10:42 -07:00
|
|
|
for _, s := range vec {
|
|
|
|
withoutMetric := s.Metric
|
2016-02-07 10:03:16 -08:00
|
|
|
if without {
|
|
|
|
for _, l := range grouping {
|
|
|
|
withoutMetric.Del(l)
|
|
|
|
}
|
|
|
|
withoutMetric.Del(model.MetricNameLabel)
|
2016-07-05 09:12:19 -07:00
|
|
|
if op == itemCountValues {
|
|
|
|
withoutMetric.Set(valueLabel, model.LabelValue(s.Value.String()))
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if op == itemCountValues {
|
|
|
|
s.Metric.Set(valueLabel, model.LabelValue(s.Value.String()))
|
|
|
|
}
|
2016-02-07 10:03:16 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
var groupingKey uint64
|
|
|
|
if without {
|
|
|
|
groupingKey = uint64(withoutMetric.Metric.Fingerprint())
|
|
|
|
} else {
|
2016-07-04 05:10:42 -07:00
|
|
|
groupingKey = model.SignatureForLabels(s.Metric.Metric, grouping...)
|
2016-02-07 10:03:16 -08:00
|
|
|
}
|
2015-03-30 10:13:36 -07:00
|
|
|
|
|
|
|
groupedResult, ok := result[groupingKey]
|
|
|
|
// Add a new group if it doesn't exist.
|
|
|
|
if !ok {
|
2015-08-24 09:04:41 -07:00
|
|
|
var m metric.Metric
|
2016-05-26 09:42:19 -07:00
|
|
|
if keepCommon {
|
2016-07-04 05:10:42 -07:00
|
|
|
m = s.Metric
|
2015-08-20 08:18:46 -07:00
|
|
|
m.Del(model.MetricNameLabel)
|
2016-02-07 10:03:16 -08:00
|
|
|
} else if without {
|
|
|
|
m = withoutMetric
|
2015-03-30 10:13:36 -07:00
|
|
|
} else {
|
2015-08-24 09:04:41 -07:00
|
|
|
m = metric.Metric{
|
2015-08-20 08:18:46 -07:00
|
|
|
Metric: model.Metric{},
|
2015-03-30 10:13:36 -07:00
|
|
|
Copied: true,
|
|
|
|
}
|
|
|
|
for _, l := range grouping {
|
2016-07-04 05:10:42 -07:00
|
|
|
if v, ok := s.Metric.Metric[l]; ok {
|
2015-03-30 10:13:36 -07:00
|
|
|
m.Set(l, v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
result[groupingKey] = &groupedAggregation{
|
|
|
|
labels: m,
|
2016-07-04 05:10:42 -07:00
|
|
|
value: s.Value,
|
|
|
|
valuesSquaredSum: s.Value * s.Value,
|
2015-03-30 10:13:36 -07:00
|
|
|
groupCount: 1,
|
|
|
|
}
|
2016-07-08 05:48:48 -07:00
|
|
|
if op == itemTopK || op == itemQuantile {
|
2016-07-04 05:10:42 -07:00
|
|
|
result[groupingKey].heap = make(vectorByValueHeap, 0, k)
|
|
|
|
heap.Push(&result[groupingKey].heap, &sample{Value: s.Value, Metric: s.Metric})
|
|
|
|
} else if op == itemBottomK {
|
|
|
|
result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 0, k)
|
|
|
|
heap.Push(&result[groupingKey].reverseHeap, &sample{Value: s.Value, Metric: s.Metric})
|
|
|
|
}
|
2015-03-30 10:13:36 -07:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Add the sample to the existing group.
|
2016-05-26 09:42:19 -07:00
|
|
|
if keepCommon {
|
2016-07-04 05:10:42 -07:00
|
|
|
groupedResult.labels = labelIntersection(groupedResult.labels, s.Metric)
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
switch op {
|
|
|
|
case itemSum:
|
2016-07-04 05:10:42 -07:00
|
|
|
groupedResult.value += s.Value
|
2015-03-30 10:13:36 -07:00
|
|
|
case itemAvg:
|
2016-07-04 05:10:42 -07:00
|
|
|
groupedResult.value += s.Value
|
2015-03-30 10:13:36 -07:00
|
|
|
groupedResult.groupCount++
|
|
|
|
case itemMax:
|
2016-07-04 05:10:42 -07:00
|
|
|
if groupedResult.value < s.Value || math.IsNaN(float64(groupedResult.value)) {
|
|
|
|
groupedResult.value = s.Value
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
|
|
|
case itemMin:
|
2016-07-04 05:10:42 -07:00
|
|
|
if groupedResult.value > s.Value || math.IsNaN(float64(groupedResult.value)) {
|
|
|
|
groupedResult.value = s.Value
|
2015-03-30 10:13:36 -07:00
|
|
|
}
|
2016-07-05 09:12:19 -07:00
|
|
|
case itemCount, itemCountValues:
|
2015-03-30 10:13:36 -07:00
|
|
|
groupedResult.groupCount++
|
|
|
|
case itemStdvar, itemStddev:
|
2016-07-04 05:10:42 -07:00
|
|
|
groupedResult.value += s.Value
|
|
|
|
groupedResult.valuesSquaredSum += s.Value * s.Value
|
2015-03-30 10:13:36 -07:00
|
|
|
groupedResult.groupCount++
|
2016-07-04 05:10:42 -07:00
|
|
|
case itemTopK:
|
2016-11-04 16:48:32 -07:00
|
|
|
if int64(len(groupedResult.heap)) < k || groupedResult.heap[0].Value < s.Value || math.IsNaN(float64(groupedResult.heap[0].Value)) {
|
|
|
|
if int64(len(groupedResult.heap)) == k {
|
2016-07-04 05:10:42 -07:00
|
|
|
heap.Pop(&groupedResult.heap)
|
|
|
|
}
|
|
|
|
heap.Push(&groupedResult.heap, &sample{Value: s.Value, Metric: s.Metric})
|
|
|
|
}
|
|
|
|
case itemBottomK:
|
2016-11-04 16:48:32 -07:00
|
|
|
if int64(len(groupedResult.reverseHeap)) < k || groupedResult.reverseHeap[0].Value > s.Value || math.IsNaN(float64(groupedResult.reverseHeap[0].Value)) {
|
|
|
|
if int64(len(groupedResult.reverseHeap)) == k {
|
2016-07-04 05:10:42 -07:00
|
|
|
heap.Pop(&groupedResult.reverseHeap)
|
|
|
|
}
|
|
|
|
heap.Push(&groupedResult.reverseHeap, &sample{Value: s.Value, Metric: s.Metric})
|
|
|
|
}
|
2016-07-08 05:48:48 -07:00
|
|
|
case itemQuantile:
|
|
|
|
groupedResult.heap = append(groupedResult.heap, s)
|
2015-03-30 10:13:36 -07:00
|
|
|
default:
|
|
|
|
panic(fmt.Errorf("expected aggregation operator but got %q", op))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Construct the result vector from the aggregated groups.
|
2015-08-24 09:04:41 -07:00
|
|
|
resultVector := make(vector, 0, len(result))
|
2015-03-30 10:13:36 -07:00
|
|
|
|
|
|
|
for _, aggr := range result {
|
|
|
|
switch op {
|
|
|
|
case itemAvg:
|
2015-08-20 08:18:46 -07:00
|
|
|
aggr.value = aggr.value / model.SampleValue(aggr.groupCount)
|
2016-07-05 09:12:19 -07:00
|
|
|
case itemCount, itemCountValues:
|
2015-08-20 08:18:46 -07:00
|
|
|
aggr.value = model.SampleValue(aggr.groupCount)
|
2015-03-30 10:13:36 -07:00
|
|
|
case itemStdvar:
|
|
|
|
avg := float64(aggr.value) / float64(aggr.groupCount)
|
2015-08-20 08:18:46 -07:00
|
|
|
aggr.value = model.SampleValue(float64(aggr.valuesSquaredSum)/float64(aggr.groupCount) - avg*avg)
|
2015-03-30 10:13:36 -07:00
|
|
|
case itemStddev:
|
|
|
|
avg := float64(aggr.value) / float64(aggr.groupCount)
|
2015-08-20 08:18:46 -07:00
|
|
|
aggr.value = model.SampleValue(math.Sqrt(float64(aggr.valuesSquaredSum)/float64(aggr.groupCount) - avg*avg))
|
2016-07-04 05:10:42 -07:00
|
|
|
case itemTopK:
|
|
|
|
// The heap keeps the lowest value on top, so reverse it.
|
|
|
|
sort.Sort(sort.Reverse(aggr.heap))
|
|
|
|
for _, v := range aggr.heap {
|
|
|
|
resultVector = append(resultVector, &sample{
|
|
|
|
Metric: v.Metric,
|
|
|
|
Value: v.Value,
|
|
|
|
Timestamp: ev.Timestamp,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
continue // Bypass default append.
|
|
|
|
case itemBottomK:
|
|
|
|
// The heap keeps the lowest value on top, so reverse it.
|
|
|
|
sort.Sort(sort.Reverse(aggr.reverseHeap))
|
|
|
|
for _, v := range aggr.reverseHeap {
|
|
|
|
resultVector = append(resultVector, &sample{
|
|
|
|
Metric: v.Metric,
|
|
|
|
Value: v.Value,
|
|
|
|
Timestamp: ev.Timestamp,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
continue // Bypass default append.
|
2016-07-08 05:48:48 -07:00
|
|
|
case itemQuantile:
|
|
|
|
aggr.value = model.SampleValue(quantile(q, aggr.heap))
|
2015-03-30 10:13:36 -07:00
|
|
|
default:
|
|
|
|
// For other aggregations, we already have the right value.
|
|
|
|
}
|
2015-08-24 09:04:41 -07:00
|
|
|
sample := &sample{
|
2015-03-30 10:13:36 -07:00
|
|
|
Metric: aggr.labels,
|
|
|
|
Value: aggr.value,
|
|
|
|
Timestamp: ev.Timestamp,
|
|
|
|
}
|
|
|
|
resultVector = append(resultVector, sample)
|
|
|
|
}
|
|
|
|
return resultVector
|
|
|
|
}
|
|
|
|
|
|
|
|
// btos returns 1 if b is true, 0 otherwise.
|
2015-08-20 08:18:46 -07:00
|
|
|
func btos(b bool) model.SampleValue {
|
2015-03-30 10:13:36 -07:00
|
|
|
if b {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// shouldDropMetricName returns whether the metric name should be dropped in the
|
|
|
|
// result of the op operation.
|
|
|
|
func shouldDropMetricName(op itemType) bool {
|
|
|
|
switch op {
|
|
|
|
case itemADD, itemSUB, itemDIV, itemMUL, itemMOD:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-15 03:49:11 -07:00
|
|
|
// StalenessDelta determines the time since the last sample after which a time
|
|
|
|
// series is considered stale.
|
|
|
|
var StalenessDelta = 5 * time.Minute
|
|
|
|
|
2015-04-30 15:49:19 -07:00
|
|
|
// A queryGate controls the maximum number of concurrently running and waiting queries.
|
|
|
|
type queryGate struct {
|
|
|
|
ch chan struct{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// newQueryGate returns a query gate that limits the number of queries
|
|
|
|
// being concurrently executed.
|
|
|
|
func newQueryGate(length int) *queryGate {
|
|
|
|
return &queryGate{
|
|
|
|
ch: make(chan struct{}, length),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start blocks until the gate has a free spot or the context is done.
|
|
|
|
func (g *queryGate) Start(ctx context.Context) error {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return contextDone(ctx, "query queue")
|
|
|
|
case g.ch <- struct{}{}:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Done releases a single spot in the gate.
|
|
|
|
func (g *queryGate) Done() {
|
|
|
|
select {
|
|
|
|
case <-g.ch:
|
|
|
|
default:
|
|
|
|
panic("engine.queryGate.Done: more operations done than started")
|
|
|
|
}
|
|
|
|
}
|
2016-11-17 13:02:28 -08:00
|
|
|
|
|
|
|
// documentedType returns the internal type to the equivalent
|
|
|
|
// user facing terminology as defined in the documentation.
|
|
|
|
func documentedType(t model.ValueType) string {
|
|
|
|
switch t.String() {
|
|
|
|
case "vector":
|
|
|
|
return "instant vector"
|
|
|
|
case "matrix":
|
|
|
|
return "range vector"
|
|
|
|
default:
|
|
|
|
return t.String()
|
|
|
|
}
|
|
|
|
}
|