mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-11 08:04:04 -08:00
72d7b325a1
This allows changing the time offset for individual instant and range vectors in a query. For example, this returns the value of `foo` 5 minutes in the past relative to the current query evaluation time: foo offset 5m Note that the `offset` modifier always needs to follow the selector immediately. I.e. the following would be correct: sum(foo offset 5m) // GOOD. While the following would be *incorrect*: sum(foo) offset 5m // INVALID. The same works for range vectors. This returns the 5-minutes-rate that `foo` had a week ago: rate(foo[5m] offset 1w) This change touches the following components: * Lexer/parser: additions to correctly parse the new `offset`/`OFFSET` keyword. * AST: vector and matrix nodes now have an additional `offset` field. This is used during their evaluation to adjust query and result times appropriately. * Query analyzer: now works on separate sets of ranges and instants per offset. Isolating different offsets from each other completely in this way keeps the preloading code relatively simple. No storage engine changes were needed by this change. The rules tests have been changed to not probe the internal implementation details of the query analyzer anymore (how many instants and ranges have been preloaded). This would also become too cumbersome to test with the new model, and measuring the result of the query should be sufficient. This fixes https://github.com/prometheus/prometheus/issues/529 This fixed https://github.com/prometheus/promdash/issues/201
112 lines
3.7 KiB
Plaintext
112 lines
3.7 KiB
Plaintext
/* Copyright 2013 The Prometheus Authors
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License. */
|
|
|
|
%{
|
|
package rules
|
|
|
|
import (
|
|
"fmt"
|
|
"strconv"
|
|
"strings"
|
|
|
|
clientmodel "github.com/prometheus/client_golang/model"
|
|
)
|
|
|
|
// Lex is called by the parser generated by "go tool yacc" to obtain each
|
|
// token. The method is opened before the matching rules block and closed at
|
|
// the end of the file.
|
|
func (lexer *RulesLexer) Lex(lval *yySymType) int {
|
|
// Internal lexer states.
|
|
const (
|
|
S_INITIAL = iota
|
|
S_COMMENTS
|
|
)
|
|
|
|
// We simulate multiple start symbols for closely-related grammars via dummy tokens. See
|
|
// http://www.gnu.org/software/bison/manual/html_node/Multiple-start_002dsymbols.html
|
|
// Reason: we want to be able to parse lists of named rules as well as single expressions.
|
|
if lexer.startToken != 0 {
|
|
startToken := lexer.startToken
|
|
lexer.startToken = 0
|
|
return startToken
|
|
}
|
|
|
|
c := lexer.current
|
|
currentState := 0
|
|
|
|
if lexer.empty {
|
|
c, lexer.empty = lexer.getChar(), false
|
|
}
|
|
|
|
%}
|
|
|
|
D [0-9]
|
|
L [a-zA-Z_]
|
|
M [a-zA-Z_:]
|
|
U [smhdwy]
|
|
|
|
%x S_COMMENTS
|
|
|
|
%yyc c
|
|
%yyn c = lexer.getChar()
|
|
%yyt currentState
|
|
|
|
%%
|
|
lexer.buf = lexer.buf[:0] // The code before the first rule executed before every scan cycle (rule #0 / state 0 action)
|
|
|
|
"/*" currentState = S_COMMENTS
|
|
<S_COMMENTS>"*/" currentState = S_INITIAL
|
|
<S_COMMENTS>.|\n /* ignore chars within multi-line comments */
|
|
|
|
\/\/[^\r\n]*\n /* gobble up one-line comments */
|
|
|
|
ALERT|alert return ALERT
|
|
IF|if return IF
|
|
FOR|for return FOR
|
|
WITH|with return WITH
|
|
SUMMARY|summary return SUMMARY
|
|
DESCRIPTION|description return DESCRIPTION
|
|
|
|
PERMANENT|permanent return PERMANENT
|
|
BY|by return GROUP_OP
|
|
KEEPING_EXTRA|keeping_extra return KEEPING_EXTRA
|
|
OFFSET|offset return OFFSET
|
|
AVG|SUM|MAX|MIN|COUNT lval.str = lexer.token(); return AGGR_OP
|
|
avg|sum|max|min|count lval.str = strings.ToUpper(lexer.token()); return AGGR_OP
|
|
\<|>|AND|OR|and|or lval.str = strings.ToUpper(lexer.token()); return CMP_OP
|
|
==|!=|>=|<=|=~|!~ lval.str = lexer.token(); return CMP_OP
|
|
[+\-] lval.str = lexer.token(); return ADDITIVE_OP
|
|
[*/%] lval.str = lexer.token(); return MULT_OP
|
|
|
|
{D}+{U} lval.str = lexer.token(); return DURATION
|
|
{L}({L}|{D})* lval.str = lexer.token(); return IDENTIFIER
|
|
{M}({M}|{D})* lval.str = lexer.token(); return METRICNAME
|
|
|
|
\-?{D}+(\.{D}*)? num, err := strconv.ParseFloat(lexer.token(), 64);
|
|
if (err != nil && err.(*strconv.NumError).Err == strconv.ErrSyntax) {
|
|
panic("Invalid float")
|
|
}
|
|
lval.num = clientmodel.SampleValue(num)
|
|
return NUMBER
|
|
|
|
\"(\\.|[^\\"])*\" lval.str = lexer.token()[1:len(lexer.token()) - 1]; return STRING
|
|
\'(\\.|[^\\'])*\' lval.str = lexer.token()[1:len(lexer.token()) - 1]; return STRING
|
|
|
|
[{}\[\]()=,] return int(lexer.buf[0])
|
|
[\t\n\r ] /* gobble up any whitespace */
|
|
%%
|
|
|
|
lexer.empty = true
|
|
return int(c)
|
|
}
|