*: implement query params

This adds a parameter to the storage selection interface which allows
query engine(s) to pass information about the operations surrounding a
data selection.
This can for example be used by remote storage backends to infer the
correct downsampling aggregates that need to be provided.
This commit is contained in:
Fabian Reinartz 2018-01-09 17:44:23 +01:00
parent 4801573b64
commit 7ccd4b39b8
18 changed files with 156 additions and 138 deletions

View file

@ -88,12 +88,12 @@ func main() {
localStoragePath string localStoragePath string
notifier notifier.Options notifier notifier.Options
notifierTimeout model.Duration notifierTimeout model.Duration
queryEngine promql.EngineOptions
web web.Options web web.Options
tsdb tsdb.Options tsdb tsdb.Options
lookbackDelta model.Duration lookbackDelta model.Duration
webTimeout model.Duration webTimeout model.Duration
queryTimeout model.Duration queryTimeout model.Duration
queryConcurrency int
prometheusURL string prometheusURL string
@ -102,9 +102,6 @@ func main() {
notifier: notifier.Options{ notifier: notifier.Options{
Registerer: prometheus.DefaultRegisterer, Registerer: prometheus.DefaultRegisterer,
}, },
queryEngine: promql.EngineOptions{
Metrics: prometheus.DefaultRegisterer,
},
} }
a := kingpin.New(filepath.Base(os.Args[0]), "The Prometheus monitoring server") a := kingpin.New(filepath.Base(os.Args[0]), "The Prometheus monitoring server")
@ -178,7 +175,7 @@ func main() {
Default("2m").SetValue(&cfg.queryTimeout) Default("2m").SetValue(&cfg.queryTimeout)
a.Flag("query.max-concurrency", "Maximum number of queries executed concurrently."). a.Flag("query.max-concurrency", "Maximum number of queries executed concurrently.").
Default("20").IntVar(&cfg.queryEngine.MaxConcurrentQueries) Default("20").IntVar(&cfg.queryConcurrency)
promlogflag.AddFlags(a, &cfg.logLevel) promlogflag.AddFlags(a, &cfg.logLevel)
@ -209,8 +206,6 @@ func main() {
promql.LookbackDelta = time.Duration(cfg.lookbackDelta) promql.LookbackDelta = time.Duration(cfg.lookbackDelta)
cfg.queryEngine.Timeout = time.Duration(cfg.queryTimeout)
logger := promlog.New(cfg.logLevel) logger := promlog.New(cfg.logLevel)
// XXX(fabxc): Kubernetes does background logging which we can only customize by modifying // XXX(fabxc): Kubernetes does background logging which we can only customize by modifying
@ -233,7 +228,6 @@ func main() {
fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage) fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
) )
cfg.queryEngine.Logger = log.With(logger, "component", "query engine")
var ( var (
ctxWeb, cancelWeb = context.WithCancel(context.Background()) ctxWeb, cancelWeb = context.WithCancel(context.Background())
ctxRule = context.Background() ctxRule = context.Background()
@ -247,10 +241,17 @@ func main() {
discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify")) discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"))
scrapeManager = scrape.NewManager(log.With(logger, "component", "scrape manager"), fanoutStorage) scrapeManager = scrape.NewManager(log.With(logger, "component", "scrape manager"), fanoutStorage)
queryEngine = promql.NewEngine(fanoutStorage, &cfg.queryEngine)
ruleManager = rules.NewManager(&rules.ManagerOptions{ queryEngine = promql.NewEngine(
log.With(logger, "component", "query engine"),
prometheus.DefaultRegisterer,
cfg.queryConcurrency,
time.Duration(cfg.queryTimeout),
)
ruleManager = rules.NewManager(&rules.ManagerOptions{
Appendable: fanoutStorage, Appendable: fanoutStorage,
QueryFunc: rules.EngineQueryFunc(queryEngine), QueryFunc: rules.EngineQueryFunc(queryEngine, fanoutStorage),
NotifyFunc: sendAlerts(notifier, cfg.web.ExternalURL.String()), NotifyFunc: sendAlerts(notifier, cfg.web.ExternalURL.String()),
Context: ctxRule, Context: ctxRule,
ExternalURL: cfg.web.ExternalURL, ExternalURL: cfg.web.ExternalURL,

View file

@ -238,56 +238,58 @@ type VectorMatching struct {
} }
// Visitor allows visiting a Node and its child nodes. The Visit method is // Visitor allows visiting a Node and its child nodes. The Visit method is
// invoked for each node encountered by Walk. If the result visitor w is not // invoked for each node with the path leading to the node provided additionally.
// nil, Walk visits each of the children of node with the visitor w, followed // If the result visitor w is not nil, Walk visits each of the children
// by a call of w.Visit(nil). // of node with the visitor w, followed by a call of w.Visit(nil, nil).
type Visitor interface { type Visitor interface {
Visit(node Node) (w Visitor) Visit(node Node, path []Node) (w Visitor)
} }
// Walk traverses an AST in depth-first order: It starts by calling // Walk traverses an AST in depth-first order: It starts by calling
// v.Visit(node); node must not be nil. If the visitor w returned by // v.Visit(node, path); node must not be nil. If the visitor w returned by
// v.Visit(node) is not nil, Walk is invoked recursively with visitor // v.Visit(node, path) is not nil, Walk is invoked recursively with visitor
// w for each of the non-nil children of node, followed by a call of // w for each of the non-nil children of node, followed by a call of
// w.Visit(nil). // w.Visit(nil).
func Walk(v Visitor, node Node) { // As the tree is descended the path of previous nodes is provided.
if v = v.Visit(node); v == nil { func Walk(v Visitor, node Node, path []Node) {
if v = v.Visit(node, path); v == nil {
return return
} }
path = append(path, node)
switch n := node.(type) { switch n := node.(type) {
case Statements: case Statements:
for _, s := range n { for _, s := range n {
Walk(v, s) Walk(v, s, path)
} }
case *AlertStmt: case *AlertStmt:
Walk(v, n.Expr) Walk(v, n.Expr, path)
case *EvalStmt: case *EvalStmt:
Walk(v, n.Expr) Walk(v, n.Expr, path)
case *RecordStmt: case *RecordStmt:
Walk(v, n.Expr) Walk(v, n.Expr, path)
case Expressions: case Expressions:
for _, e := range n { for _, e := range n {
Walk(v, e) Walk(v, e, path)
} }
case *AggregateExpr: case *AggregateExpr:
Walk(v, n.Expr) Walk(v, n.Expr, path)
case *BinaryExpr: case *BinaryExpr:
Walk(v, n.LHS) Walk(v, n.LHS, path)
Walk(v, n.RHS) Walk(v, n.RHS, path)
case *Call: case *Call:
Walk(v, n.Args) Walk(v, n.Args, path)
case *ParenExpr: case *ParenExpr:
Walk(v, n.Expr) Walk(v, n.Expr, path)
case *UnaryExpr: case *UnaryExpr:
Walk(v, n.Expr) Walk(v, n.Expr, path)
case *MatrixSelector, *NumberLiteral, *StringLiteral, *VectorSelector: case *MatrixSelector, *NumberLiteral, *StringLiteral, *VectorSelector:
// nothing to do // nothing to do
@ -296,21 +298,21 @@ func Walk(v Visitor, node Node) {
panic(fmt.Errorf("promql.Walk: unhandled node type %T", node)) panic(fmt.Errorf("promql.Walk: unhandled node type %T", node))
} }
v.Visit(nil) v.Visit(nil, nil)
} }
type inspector func(Node) bool type inspector func(Node, []Node) bool
func (f inspector) Visit(node Node) Visitor { func (f inspector) Visit(node Node, path []Node) Visitor {
if f(node) { if f(node, path) {
return f return f
} }
return nil return nil
} }
// Inspect traverses an AST in depth-first order: It starts by calling // Inspect traverses an AST in depth-first order: It starts by calling
// f(node); node must not be nil. If f returns true, Inspect invokes f // f(node, path); node must not be nil. If f returns true, Inspect invokes f
// for all the non-nil children of node, recursively. // for all the non-nil children of node, recursively.
func Inspect(node Node, f func(Node) bool) { func Inspect(node Node, f func(Node, []Node) bool) {
Walk(inspector(f), node) Walk(inspector(f), node, nil)
} }

View file

@ -89,6 +89,8 @@ type Query interface {
// query implements the Query interface. // query implements the Query interface.
type query struct { type query struct {
// Underlying data provider.
queryable storage.Queryable
// The original query string. // The original query string.
q string q string
// Statement of the parsed query. // Statement of the parsed query.
@ -150,26 +152,18 @@ func contextDone(ctx context.Context, env string) error {
// Engine handles the lifetime of queries from beginning to end. // Engine handles the lifetime of queries from beginning to end.
// It is connected to a querier. // It is connected to a querier.
type Engine struct { type Engine struct {
// A Querier constructor against an underlying storage. logger log.Logger
queryable Queryable metrics *engineMetrics
metrics *engineMetrics timeout time.Duration
// The gate limiting the maximum number of concurrent and waiting queries.
gate *queryGate gate *queryGate
options *EngineOptions
logger log.Logger
}
// Queryable allows opening a storage querier.
type Queryable interface {
Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error)
} }
// NewEngine returns a new engine. // NewEngine returns a new engine.
func NewEngine(queryable Queryable, o *EngineOptions) *Engine { func NewEngine(logger log.Logger, reg prometheus.Registerer, maxConcurrent int, timeout time.Duration) *Engine {
if o == nil { if logger == nil {
o = DefaultEngineOptions logger = log.NewNopLogger()
} }
metrics := &engineMetrics{ metrics := &engineMetrics{
currentQueries: prometheus.NewGauge(prometheus.GaugeOpts{ currentQueries: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace, Namespace: namespace,
@ -212,10 +206,10 @@ func NewEngine(queryable Queryable, o *EngineOptions) *Engine {
ConstLabels: prometheus.Labels{"slice": "result_sort"}, ConstLabels: prometheus.Labels{"slice": "result_sort"},
}), }),
} }
metrics.maxConcurrentQueries.Set(float64(o.MaxConcurrentQueries)) metrics.maxConcurrentQueries.Set(float64(maxConcurrent))
if o.Metrics != nil { if reg != nil {
o.Metrics.MustRegister( reg.MustRegister(
metrics.currentQueries, metrics.currentQueries,
metrics.maxConcurrentQueries, metrics.maxConcurrentQueries,
metrics.queryInnerEval, metrics.queryInnerEval,
@ -225,36 +219,20 @@ func NewEngine(queryable Queryable, o *EngineOptions) *Engine {
) )
} }
return &Engine{ return &Engine{
queryable: queryable, gate: newQueryGate(maxConcurrent),
gate: newQueryGate(o.MaxConcurrentQueries), timeout: timeout,
options: o, logger: logger,
logger: o.Logger, metrics: metrics,
metrics: metrics,
} }
} }
// EngineOptions contains configuration parameters for an Engine.
type EngineOptions struct {
MaxConcurrentQueries int
Timeout time.Duration
Logger log.Logger
Metrics prometheus.Registerer
}
// DefaultEngineOptions are the default engine options.
var DefaultEngineOptions = &EngineOptions{
MaxConcurrentQueries: 20,
Timeout: 2 * time.Minute,
Logger: log.NewNopLogger(),
}
// NewInstantQuery returns an evaluation query for the given expression at the given time. // NewInstantQuery returns an evaluation query for the given expression at the given time.
func (ng *Engine) NewInstantQuery(qs string, ts time.Time) (Query, error) { func (ng *Engine) NewInstantQuery(q storage.Queryable, qs string, ts time.Time) (Query, error) {
expr, err := ParseExpr(qs) expr, err := ParseExpr(qs)
if err != nil { if err != nil {
return nil, err return nil, err
} }
qry := ng.newQuery(expr, ts, ts, 0) qry := ng.newQuery(q, expr, ts, ts, 0)
qry.q = qs qry.q = qs
return qry, nil return qry, nil
@ -262,7 +240,7 @@ func (ng *Engine) NewInstantQuery(qs string, ts time.Time) (Query, error) {
// NewRangeQuery returns an evaluation query for the given time range and with // NewRangeQuery returns an evaluation query for the given time range and with
// the resolution set by the interval. // the resolution set by the interval.
func (ng *Engine) NewRangeQuery(qs string, start, end time.Time, interval time.Duration) (Query, error) { func (ng *Engine) NewRangeQuery(q storage.Queryable, qs string, start, end time.Time, interval time.Duration) (Query, error) {
expr, err := ParseExpr(qs) expr, err := ParseExpr(qs)
if err != nil { if err != nil {
return nil, err return nil, err
@ -270,13 +248,13 @@ func (ng *Engine) NewRangeQuery(qs string, start, end time.Time, interval time.D
if expr.Type() != ValueTypeVector && expr.Type() != ValueTypeScalar { if expr.Type() != ValueTypeVector && expr.Type() != ValueTypeScalar {
return nil, fmt.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", documentedType(expr.Type())) return nil, fmt.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", documentedType(expr.Type()))
} }
qry := ng.newQuery(expr, start, end, interval) qry := ng.newQuery(q, expr, start, end, interval)
qry.q = qs qry.q = qs
return qry, nil return qry, nil
} }
func (ng *Engine) newQuery(expr Expr, start, end time.Time, interval time.Duration) *query { func (ng *Engine) newQuery(q storage.Queryable, expr Expr, start, end time.Time, interval time.Duration) *query {
es := &EvalStmt{ es := &EvalStmt{
Expr: expr, Expr: expr,
Start: start, Start: start,
@ -284,9 +262,10 @@ func (ng *Engine) newQuery(expr Expr, start, end time.Time, interval time.Durati
Interval: interval, Interval: interval,
} }
qry := &query{ qry := &query{
stmt: es, stmt: es,
ng: ng, ng: ng,
stats: stats.NewTimerGroup(), stats: stats.NewTimerGroup(),
queryable: q,
} }
return qry return qry
} }
@ -316,7 +295,7 @@ func (ng *Engine) exec(ctx context.Context, q *query) (Value, error) {
ng.metrics.currentQueries.Inc() ng.metrics.currentQueries.Inc()
defer ng.metrics.currentQueries.Dec() defer ng.metrics.currentQueries.Dec()
ctx, cancel := context.WithTimeout(ctx, ng.options.Timeout) ctx, cancel := context.WithTimeout(ctx, ng.timeout)
q.cancel = cancel q.cancel = cancel
execTimer := q.stats.GetTimer(stats.ExecTotalTime).Start() execTimer := q.stats.GetTimer(stats.ExecTotalTime).Start()
@ -363,9 +342,8 @@ func durationMilliseconds(d time.Duration) int64 {
// execEvalStmt evaluates the expression of an evaluation statement for the given time range. // execEvalStmt evaluates the expression of an evaluation statement for the given time range.
func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *EvalStmt) (Value, error) { func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *EvalStmt) (Value, error) {
prepareTimer := query.stats.GetTimer(stats.QueryPreparationTime).Start() prepareTimer := query.stats.GetTimer(stats.QueryPreparationTime).Start()
querier, err := ng.populateIterators(ctx, s) querier, err := ng.populateIterators(ctx, query.queryable, s)
prepareTimer.Stop() prepareTimer.Stop()
ng.metrics.queryPrepareTime.Observe(prepareTimer.ElapsedTime().Seconds()) ng.metrics.queryPrepareTime.Observe(prepareTimer.ElapsedTime().Seconds())
@ -489,10 +467,9 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *EvalStmt) (
return mat, nil return mat, nil
} }
func (ng *Engine) populateIterators(ctx context.Context, s *EvalStmt) (storage.Querier, error) { func (ng *Engine) populateIterators(ctx context.Context, q storage.Queryable, s *EvalStmt) (storage.Querier, error) {
var maxOffset time.Duration var maxOffset time.Duration
Inspect(s.Expr, func(node Node, _ []Node) bool {
Inspect(s.Expr, func(node Node) bool {
switch n := node.(type) { switch n := node.(type) {
case *VectorSelector: case *VectorSelector:
if maxOffset < LookbackDelta { if maxOffset < LookbackDelta {
@ -514,15 +491,21 @@ func (ng *Engine) populateIterators(ctx context.Context, s *EvalStmt) (storage.Q
mint := s.Start.Add(-maxOffset) mint := s.Start.Add(-maxOffset)
querier, err := ng.queryable.Querier(ctx, timestamp.FromTime(mint), timestamp.FromTime(s.End)) querier, err := q.Querier(ctx, timestamp.FromTime(mint), timestamp.FromTime(s.End))
if err != nil { if err != nil {
return nil, err return nil, err
} }
Inspect(s.Expr, func(node Node) bool { Inspect(s.Expr, func(node Node, path []Node) bool {
params := &storage.SelectParams{
Step: int64(s.Interval / time.Millisecond),
}
switch n := node.(type) { switch n := node.(type) {
case *VectorSelector: case *VectorSelector:
set, err := querier.Select(n.LabelMatchers...) params.Func = extractFuncFromPath(path)
set, err := querier.Select(params, n.LabelMatchers...)
if err != nil { if err != nil {
level.Error(ng.logger).Log("msg", "error selecting series set", "err", err) level.Error(ng.logger).Log("msg", "error selecting series set", "err", err)
return false return false
@ -539,7 +522,9 @@ func (ng *Engine) populateIterators(ctx context.Context, s *EvalStmt) (storage.Q
} }
case *MatrixSelector: case *MatrixSelector:
set, err := querier.Select(n.LabelMatchers...) params.Func = extractFuncFromPath(path)
set, err := querier.Select(params, n.LabelMatchers...)
if err != nil { if err != nil {
level.Error(ng.logger).Log("msg", "error selecting series set", "err", err) level.Error(ng.logger).Log("msg", "error selecting series set", "err", err)
return false return false
@ -559,6 +544,25 @@ func (ng *Engine) populateIterators(ctx context.Context, s *EvalStmt) (storage.Q
return querier, err return querier, err
} }
// extractFuncFromPath walks up the path and searches for the first instance of
// a function or aggregation.
func extractFuncFromPath(p []Node) string {
if len(p) == 0 {
return ""
}
switch n := p[len(p)-1].(type) {
case *AggregateExpr:
return n.Op.String()
case *Call:
return n.Func.Name
case *BinaryExpr:
// If we hit a binary expression we terminate since we only care about functions
// or aggregations over a single metric.
return ""
}
return extractFuncFromPath(p[:len(p)-1])
}
func expandSeriesSet(it storage.SeriesSet) (res []storage.Series, err error) { func expandSeriesSet(it storage.SeriesSet) (res []storage.Series, err error) {
for it.Next() { for it.Next() {
res = append(res, it.At()) res = append(res, it.At())

View file

@ -25,7 +25,9 @@ import (
) )
func TestQueryConcurrency(t *testing.T) { func TestQueryConcurrency(t *testing.T) {
engine := NewEngine(nil, nil) concurrentQueries := 10
engine := NewEngine(nil, nil, concurrentQueries, 10*time.Second)
ctx, cancelCtx := context.WithCancel(context.Background()) ctx, cancelCtx := context.WithCancel(context.Background())
defer cancelCtx() defer cancelCtx()
@ -38,7 +40,7 @@ func TestQueryConcurrency(t *testing.T) {
return nil return nil
} }
for i := 0; i < DefaultEngineOptions.MaxConcurrentQueries; i++ { for i := 0; i < concurrentQueries; i++ {
q := engine.newTestQuery(f) q := engine.newTestQuery(f)
go q.Exec(ctx) go q.Exec(ctx)
select { select {
@ -70,16 +72,13 @@ func TestQueryConcurrency(t *testing.T) {
} }
// Terminate remaining queries. // Terminate remaining queries.
for i := 0; i < DefaultEngineOptions.MaxConcurrentQueries; i++ { for i := 0; i < concurrentQueries; i++ {
block <- struct{}{} block <- struct{}{}
} }
} }
func TestQueryTimeout(t *testing.T) { func TestQueryTimeout(t *testing.T) {
engine := NewEngine(nil, &EngineOptions{ engine := NewEngine(nil, nil, 20, 5*time.Millisecond)
Timeout: 5 * time.Millisecond,
MaxConcurrentQueries: 20,
})
ctx, cancelCtx := context.WithCancel(context.Background()) ctx, cancelCtx := context.WithCancel(context.Background())
defer cancelCtx() defer cancelCtx()
@ -98,7 +97,7 @@ func TestQueryTimeout(t *testing.T) {
} }
func TestQueryCancel(t *testing.T) { func TestQueryCancel(t *testing.T) {
engine := NewEngine(nil, nil) engine := NewEngine(nil, nil, 10, 10*time.Second)
ctx, cancelCtx := context.WithCancel(context.Background()) ctx, cancelCtx := context.WithCancel(context.Background())
defer cancelCtx() defer cancelCtx()
@ -144,7 +143,7 @@ func TestQueryCancel(t *testing.T) {
} }
func TestEngineShutdown(t *testing.T) { func TestEngineShutdown(t *testing.T) {
engine := NewEngine(nil, nil) engine := NewEngine(nil, nil, 10, 10*time.Second)
ctx, cancelCtx := context.WithCancel(context.Background()) ctx, cancelCtx := context.WithCancel(context.Background())
block := make(chan struct{}) block := make(chan struct{})
@ -276,9 +275,9 @@ load 10s
var err error var err error
var qry Query var qry Query
if c.Interval == 0 { if c.Interval == 0 {
qry, err = test.QueryEngine().NewInstantQuery(c.Query, c.Start) qry, err = test.QueryEngine().NewInstantQuery(test.Queryable(), c.Query, c.Start)
} else { } else {
qry, err = test.QueryEngine().NewRangeQuery(c.Query, c.Start, c.End, c.Interval) qry, err = test.QueryEngine().NewRangeQuery(test.Queryable(), c.Query, c.Start, c.End, c.Interval)
} }
if err != nil { if err != nil {
t.Fatalf("unexpected error creating query: %q", err) t.Fatalf("unexpected error creating query: %q", err)

View file

@ -16,6 +16,7 @@ package promql
import ( import (
"context" "context"
"testing" "testing"
"time"
"github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/timestamp" "github.com/prometheus/prometheus/pkg/timestamp"
@ -85,7 +86,7 @@ func TestDeriv(t *testing.T) {
// so we test it by hand. // so we test it by hand.
storage := testutil.NewStorage(t) storage := testutil.NewStorage(t)
defer storage.Close() defer storage.Close()
engine := NewEngine(storage, nil) engine := NewEngine(nil, nil, 10, 10*time.Second)
a, err := storage.Appender() a, err := storage.Appender()
if err != nil { if err != nil {
@ -100,7 +101,7 @@ func TestDeriv(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
query, err := engine.NewInstantQuery("deriv(foo[30m])", timestamp.Time(1493712846939)) query, err := engine.NewInstantQuery(storage, "deriv(foo[30m])", timestamp.Time(1493712846939))
if err != nil { if err != nil {
t.Fatalf("Error parsing query: %s", err) t.Fatalf("Error parsing query: %s", err)
} }

View file

@ -83,6 +83,11 @@ func (t *Test) QueryEngine() *Engine {
return t.queryEngine return t.queryEngine
} }
// Queryable allows querying the test data.
func (t *Test) Queryable() storage.Queryable {
return t.storage
}
// Context returns the test's context. // Context returns the test's context.
func (t *Test) Context() context.Context { func (t *Test) Context() context.Context {
return t.context return t.context
@ -460,7 +465,7 @@ func (t *Test) exec(tc testCommand) error {
} }
case *evalCmd: case *evalCmd:
q := t.queryEngine.newQuery(cmd.expr, cmd.start, cmd.end, cmd.interval) q := t.queryEngine.newQuery(t.storage, cmd.expr, cmd.start, cmd.end, cmd.interval)
res := q.Exec(t.context) res := q.Exec(t.context)
if res.Err != nil { if res.Err != nil {
if cmd.fail { if cmd.fail {
@ -495,7 +500,7 @@ func (t *Test) clear() {
} }
t.storage = testutil.NewStorage(t) t.storage = testutil.NewStorage(t)
t.queryEngine = NewEngine(t.storage, nil) t.queryEngine = NewEngine(nil, nil, 20, 10*time.Second)
t.context, t.cancelCtx = context.WithCancel(context.Background()) t.context, t.cancelCtx = context.WithCancel(context.Background())
} }

View file

@ -106,9 +106,9 @@ type QueryFunc func(ctx context.Context, q string, t time.Time) (promql.Vector,
// EngineQueryFunc returns a new query function that executes instant queries against // EngineQueryFunc returns a new query function that executes instant queries against
// the given engine. // the given engine.
// It converts scaler into vector results. // It converts scaler into vector results.
func EngineQueryFunc(engine *promql.Engine) QueryFunc { func EngineQueryFunc(engine *promql.Engine, q storage.Queryable) QueryFunc {
return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {
q, err := engine.NewInstantQuery(qs, t) q, err := engine.NewInstantQuery(q, qs, t)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -144,7 +144,7 @@ func TestAlertingRule(t *testing.T) {
evalTime := baseTime.Add(test.time) evalTime := baseTime.Add(test.time)
res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine()), nil) res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil)
testutil.Ok(t, err) testutil.Ok(t, err)
for i := range test.result { for i := range test.result {
@ -174,9 +174,9 @@ func annotateWithTime(lines []string, ts time.Time) []string {
func TestStaleness(t *testing.T) { func TestStaleness(t *testing.T) {
storage := testutil.NewStorage(t) storage := testutil.NewStorage(t)
defer storage.Close() defer storage.Close()
engine := promql.NewEngine(storage, nil) engine := promql.NewEngine(nil, nil, 10, 10*time.Second)
opts := &ManagerOptions{ opts := &ManagerOptions{
QueryFunc: EngineQueryFunc(engine), QueryFunc: EngineQueryFunc(engine, storage),
Appendable: storage, Appendable: storage,
Context: context.Background(), Context: context.Background(),
Logger: log.NewNopLogger(), Logger: log.NewNopLogger(),
@ -210,7 +210,7 @@ func TestStaleness(t *testing.T) {
matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a_plus_one") matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a_plus_one")
testutil.Ok(t, err) testutil.Ok(t, err)
set, err := querier.Select(matcher) set, err := querier.Select(nil, matcher)
testutil.Ok(t, err) testutil.Ok(t, err)
samples, err := readSeriesSet(set) samples, err := readSeriesSet(set)

View file

@ -28,7 +28,7 @@ func TestRuleEval(t *testing.T) {
storage := testutil.NewStorage(t) storage := testutil.NewStorage(t)
defer storage.Close() defer storage.Close()
engine := promql.NewEngine(storage, nil) engine := promql.NewEngine(nil, nil, 10, 10*time.Second)
ctx, cancelCtx := context.WithCancel(context.Background()) ctx, cancelCtx := context.WithCancel(context.Background())
defer cancelCtx() defer cancelCtx()
@ -62,7 +62,7 @@ func TestRuleEval(t *testing.T) {
for _, test := range suite { for _, test := range suite {
rule := NewRecordingRule(test.name, test.expr, test.labels) rule := NewRecordingRule(test.name, test.expr, test.labels)
result, err := rule.Eval(ctx, now, EngineQueryFunc(engine), nil) result, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil)
testutil.Ok(t, err) testutil.Ok(t, err)
testutil.Equals(t, result, test.result) testutil.Equals(t, result, test.result)
} }

View file

@ -216,10 +216,10 @@ func NewMergeQuerier(queriers []Querier) Querier {
} }
// Select returns a set of series that matches the given label matchers. // Select returns a set of series that matches the given label matchers.
func (q *mergeQuerier) Select(matchers ...*labels.Matcher) (SeriesSet, error) { func (q *mergeQuerier) Select(params *SelectParams, matchers ...*labels.Matcher) (SeriesSet, error) {
seriesSets := make([]SeriesSet, 0, len(q.queriers)) seriesSets := make([]SeriesSet, 0, len(q.queriers))
for _, querier := range q.queriers { for _, querier := range q.queriers {
set, err := querier.Select(matchers...) set, err := querier.Select(params, matchers...)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -52,7 +52,7 @@ type Queryable interface {
// Querier provides reading access to time series data. // Querier provides reading access to time series data.
type Querier interface { type Querier interface {
// Select returns a set of series that matches the given label matchers. // Select returns a set of series that matches the given label matchers.
Select(...*labels.Matcher) (SeriesSet, error) Select(*SelectParams, ...*labels.Matcher) (SeriesSet, error)
// LabelValues returns all potential values for a label name. // LabelValues returns all potential values for a label name.
LabelValues(name string) ([]string, error) LabelValues(name string) ([]string, error)
@ -61,6 +61,12 @@ type Querier interface {
Close() error Close() error
} }
// SelectParams specifies parameters passed to data selections.
type SelectParams struct {
Step int64 // Query step size in milliseconds.
Func string // String representation of surrounding function or aggregation.
}
// QueryableFunc is an adapter to allow the use of ordinary functions as // QueryableFunc is an adapter to allow the use of ordinary functions as
// Queryables. It follows the idea of http.HandlerFunc. // Queryables. It follows the idea of http.HandlerFunc.
type QueryableFunc func(ctx context.Context, mint, maxt int64) (Querier, error) type QueryableFunc func(ctx context.Context, mint, maxt int64) (Querier, error)

View file

@ -22,7 +22,7 @@ func NoopQuerier() Querier {
return noopQuerier{} return noopQuerier{}
} }
func (noopQuerier) Select(...*labels.Matcher) (SeriesSet, error) { func (noopQuerier) Select(*SelectParams, ...*labels.Matcher) (SeriesSet, error) {
return NoopSeriesSet(), nil return NoopSeriesSet(), nil
} }

View file

@ -43,7 +43,7 @@ type querier struct {
// Select implements storage.Querier and uses the given matchers to read series // Select implements storage.Querier and uses the given matchers to read series
// sets from the Client. // sets from the Client.
func (q *querier) Select(matchers ...*labels.Matcher) (storage.SeriesSet, error) { func (q *querier) Select(_ *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, error) {
query, err := ToQuery(q.mint, q.maxt, matchers) query, err := ToQuery(q.mint, q.maxt, matchers)
if err != nil { if err != nil {
return nil, err return nil, err
@ -91,9 +91,9 @@ type externalLabelsQuerier struct {
// Select adds equality matchers for all external labels to the list of matchers // Select adds equality matchers for all external labels to the list of matchers
// before calling the wrapped storage.Queryable. The added external labels are // before calling the wrapped storage.Queryable. The added external labels are
// removed from the returned series sets. // removed from the returned series sets.
func (q externalLabelsQuerier) Select(matchers ...*labels.Matcher) (storage.SeriesSet, error) { func (q externalLabelsQuerier) Select(p *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, error) {
m, added := q.addExternalLabels(matchers) m, added := q.addExternalLabels(matchers)
s, err := q.Querier.Select(m...) s, err := q.Querier.Select(p, m...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -144,7 +144,7 @@ type requiredMatchersQuerier struct {
// Select returns a NoopSeriesSet if the given matchers don't match the label // Select returns a NoopSeriesSet if the given matchers don't match the label
// set of the requiredMatchersQuerier. Otherwise it'll call the wrapped querier. // set of the requiredMatchersQuerier. Otherwise it'll call the wrapped querier.
func (q requiredMatchersQuerier) Select(matchers ...*labels.Matcher) (storage.SeriesSet, error) { func (q requiredMatchersQuerier) Select(p *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, error) {
ms := q.requiredMatchers ms := q.requiredMatchers
for _, m := range matchers { for _, m := range matchers {
for i, r := range ms { for i, r := range ms {
@ -160,7 +160,7 @@ func (q requiredMatchersQuerier) Select(matchers ...*labels.Matcher) (storage.Se
if len(ms) > 0 { if len(ms) > 0 {
return storage.NoopSeriesSet(), nil return storage.NoopSeriesSet(), nil
} }
return q.Querier.Select(matchers...) return q.Querier.Select(p, matchers...)
} }
// addExternalLabels adds matchers for each external label. External labels // addExternalLabels adds matchers for each external label. External labels

View file

@ -42,7 +42,7 @@ func TestExternalLabelsQuerierSelect(t *testing.T) {
externalLabels: model.LabelSet{"region": "europe"}, externalLabels: model.LabelSet{"region": "europe"},
} }
want := newSeriesSetFilter(mockSeriesSet{}, q.externalLabels) want := newSeriesSetFilter(mockSeriesSet{}, q.externalLabels)
have, err := q.Select(matchers...) have, err := q.Select(nil, matchers...)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
@ -157,7 +157,7 @@ type mockSeriesSet struct {
storage.SeriesSet storage.SeriesSet
} }
func (mockQuerier) Select(...*labels.Matcher) (storage.SeriesSet, error) { func (mockQuerier) Select(*storage.SelectParams, ...*labels.Matcher) (storage.SeriesSet, error) {
return mockSeriesSet{}, nil return mockSeriesSet{}, nil
} }
@ -313,7 +313,7 @@ func TestRequiredLabelsQuerierSelect(t *testing.T) {
requiredMatchers: test.requiredMatchers, requiredMatchers: test.requiredMatchers,
} }
have, err := q.Select(test.matchers...) have, err := q.Select(nil, test.matchers...)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }

View file

@ -188,7 +188,7 @@ type querier struct {
q tsdb.Querier q tsdb.Querier
} }
func (q querier) Select(oms ...*labels.Matcher) (storage.SeriesSet, error) { func (q querier) Select(_ *storage.SelectParams, oms ...*labels.Matcher) (storage.SeriesSet, error) {
ms := make([]tsdbLabels.Matcher, 0, len(oms)) ms := make([]tsdbLabels.Matcher, 0, len(oms))
for _, om := range oms { for _, om := range oms {

View file

@ -108,7 +108,7 @@ type apiFunc func(r *http.Request) (interface{}, *apiError)
// API can register a set of endpoints in a router and handle // API can register a set of endpoints in a router and handle
// them using the provided storage and query engine. // them using the provided storage and query engine.
type API struct { type API struct {
Queryable promql.Queryable Queryable storage.Queryable
QueryEngine *promql.Engine QueryEngine *promql.Engine
targetRetriever targetRetriever targetRetriever targetRetriever
@ -125,7 +125,7 @@ type API struct {
// NewAPI returns an initialized API type. // NewAPI returns an initialized API type.
func NewAPI( func NewAPI(
qe *promql.Engine, qe *promql.Engine,
q promql.Queryable, q storage.Queryable,
tr targetRetriever, tr targetRetriever,
ar alertmanagerRetriever, ar alertmanagerRetriever,
configFunc func() config.Config, configFunc func() config.Config,
@ -222,7 +222,7 @@ func (api *API) query(r *http.Request) (interface{}, *apiError) {
defer cancel() defer cancel()
} }
qry, err := api.QueryEngine.NewInstantQuery(r.FormValue("query"), ts) qry, err := api.QueryEngine.NewInstantQuery(api.Queryable, r.FormValue("query"), ts)
if err != nil { if err != nil {
return nil, &apiError{errorBadData, err} return nil, &apiError{errorBadData, err}
} }
@ -296,7 +296,7 @@ func (api *API) queryRange(r *http.Request) (interface{}, *apiError) {
defer cancel() defer cancel()
} }
qry, err := api.QueryEngine.NewRangeQuery(r.FormValue("query"), start, end, step) qry, err := api.QueryEngine.NewRangeQuery(api.Queryable, r.FormValue("query"), start, end, step)
if err != nil { if err != nil {
return nil, &apiError{errorBadData, err} return nil, &apiError{errorBadData, err}
} }
@ -396,7 +396,7 @@ func (api *API) series(r *http.Request) (interface{}, *apiError) {
var sets []storage.SeriesSet var sets []storage.SeriesSet
for _, mset := range matcherSets { for _, mset := range matcherSets {
s, err := q.Select(mset...) s, err := q.Select(nil, mset...)
if err != nil { if err != nil {
return nil, &apiError{errorExec, err} return nil, &apiError{errorExec, err}
} }
@ -537,7 +537,7 @@ func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) {
} }
} }
set, err := querier.Select(filteredMatchers...) set, err := querier.Select(nil, filteredMatchers...)
if err != nil { if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
return return

View file

@ -74,7 +74,7 @@ func (h *Handler) federation(w http.ResponseWriter, req *http.Request) {
var sets []storage.SeriesSet var sets []storage.SeriesSet
for _, mset := range matcherSets { for _, mset := range matcherSets {
s, err := q.Select(mset...) s, err := q.Select(nil, mset...)
if err != nil { if err != nil {
federationErrors.Inc() federationErrors.Inc()
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)

View file

@ -536,7 +536,7 @@ func (h *Handler) consoles(w http.ResponseWriter, r *http.Request) {
"__console_"+name, "__console_"+name,
data, data,
h.now(), h.now(),
template.QueryFunc(rules.EngineQueryFunc(h.queryEngine)), template.QueryFunc(rules.EngineQueryFunc(h.queryEngine, h.storage)),
h.options.ExternalURL, h.options.ExternalURL,
) )
filenames, err := filepath.Glob(h.options.ConsoleLibrariesPath + "/*.lib") filenames, err := filepath.Glob(h.options.ConsoleLibrariesPath + "/*.lib")
@ -766,7 +766,7 @@ func (h *Handler) executeTemplate(w http.ResponseWriter, name string, data inter
name, name,
data, data,
h.now(), h.now(),
template.QueryFunc(rules.EngineQueryFunc(h.queryEngine)), template.QueryFunc(rules.EngineQueryFunc(h.queryEngine, h.storage)),
h.options.ExternalURL, h.options.ExternalURL,
) )
tmpl.Funcs(tmplFuncs(h.consolesPath(), h.options)) tmpl.Funcs(tmplFuncs(h.consolesPath(), h.options))