mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-27 14:39:40 -08:00
c3a2b63fe9
This adds the population standard deviation and variance as aggregation functions, useful for spotting how many standard deviations some samples are from the mean.
119 lines
4.1 KiB
Plaintext
119 lines
4.1 KiB
Plaintext
/* Copyright 2013 The Prometheus Authors
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License. */
|
|
|
|
%{
|
|
package rules
|
|
|
|
import (
|
|
"fmt"
|
|
"strconv"
|
|
"strings"
|
|
|
|
clientmodel "github.com/prometheus/client_golang/model"
|
|
)
|
|
|
|
// Lex is called by the parser generated by "go tool yacc" to obtain each
|
|
// token. The method is opened before the matching rules block and closed at
|
|
// the end of the file.
|
|
func (lexer *RulesLexer) Lex(lval *yySymType) int {
|
|
// Internal lexer states.
|
|
const (
|
|
S_INITIAL = iota
|
|
S_COMMENTS
|
|
)
|
|
|
|
// We simulate multiple start symbols for closely-related grammars via dummy tokens. See
|
|
// http://www.gnu.org/software/bison/manual/html_node/Multiple-start_002dsymbols.html
|
|
// Reason: we want to be able to parse lists of named rules as well as single expressions.
|
|
if lexer.startToken != 0 {
|
|
startToken := lexer.startToken
|
|
lexer.startToken = 0
|
|
return startToken
|
|
}
|
|
|
|
c := lexer.current
|
|
currentState := 0
|
|
|
|
if lexer.empty {
|
|
c, lexer.empty = lexer.getChar(), false
|
|
}
|
|
|
|
%}
|
|
|
|
D [0-9]
|
|
L [a-zA-Z_]
|
|
M [a-zA-Z_:]
|
|
U [smhdwy]
|
|
|
|
FLOAT ({D}*\.?{D}+|{D}+\.?{D}*){EXPONENT}?|[+-]?[iI][nN][fF]|[nN][aA][nN]
|
|
EXPONENT [eE][-+]?[0-9]+
|
|
|
|
STR \"(\\.|[^\\"])*\"|\'(\\.|[^\\'])*\'
|
|
|
|
%x S_COMMENTS
|
|
|
|
%yyc c
|
|
%yyn c = lexer.getChar()
|
|
%yyt currentState
|
|
|
|
%%
|
|
lexer.buf = lexer.buf[:0] // The code before the first rule executed before every scan cycle (rule #0 / state 0 action)
|
|
|
|
"/*" currentState = S_COMMENTS
|
|
<S_COMMENTS>"*/" currentState = S_INITIAL
|
|
<S_COMMENTS>.|\n /* ignore chars within multi-line comments */
|
|
|
|
\/\/[^\r\n]*\n /* gobble up one-line comments */
|
|
|
|
ALERT|alert return ALERT
|
|
IF|if return IF
|
|
FOR|for return FOR
|
|
WITH|with return WITH
|
|
SUMMARY|summary return SUMMARY
|
|
DESCRIPTION|description return DESCRIPTION
|
|
|
|
PERMANENT|permanent return PERMANENT
|
|
BY|by return GROUP_OP
|
|
ON|on return MATCH_OP
|
|
GROUP_LEFT|GROUP_RIGHT lval.str = lexer.token(); return MATCH_MOD
|
|
group_left|group_right lval.str = strings.ToUpper(lexer.token()); return MATCH_MOD
|
|
KEEPING_EXTRA|keeping_extra return KEEPING_EXTRA
|
|
OFFSET|offset return OFFSET
|
|
AVG|SUM|MAX|MIN|COUNT|STDVAR|STDDEV lval.str = lexer.token(); return AGGR_OP
|
|
avg|sum|max|min|count|stdvar|stddev lval.str = strings.ToUpper(lexer.token()); return AGGR_OP
|
|
\<|>|AND|OR|and|or lval.str = strings.ToUpper(lexer.token()); return CMP_OP
|
|
==|!=|>=|<=|=~|!~ lval.str = lexer.token(); return CMP_OP
|
|
[+\-] lval.str = lexer.token(); return ADDITIVE_OP
|
|
[*/%] lval.str = lexer.token(); return MULT_OP
|
|
|
|
{FLOAT} num, err := strconv.ParseFloat(lexer.token(), 64);
|
|
if (err != nil && err.(*strconv.NumError).Err == strconv.ErrSyntax) {
|
|
panic("Invalid float")
|
|
}
|
|
lval.num = clientmodel.SampleValue(num)
|
|
return NUMBER
|
|
|
|
{D}+{U} lval.str = lexer.token(); return DURATION
|
|
{L}({L}|{D})* lval.str = lexer.token(); return IDENTIFIER
|
|
{M}({M}|{D})* lval.str = lexer.token(); return METRICNAME
|
|
|
|
{STR} lval.str = lexer.token()[1:len(lexer.token()) - 1]; return STRING
|
|
|
|
[{}\[\]()=,] return int(lexer.buf[0])
|
|
[\t\n\r ] /* gobble up any whitespace */
|
|
%%
|
|
|
|
lexer.empty = true
|
|
return int(c)
|
|
}
|