2015-01-21 11:07:45 -08:00
|
|
|
/* Copyright 2013 The Prometheus Authors
|
2013-07-30 08:18:07 -07:00
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License. */
|
2013-02-07 02:49:04 -08:00
|
|
|
|
2013-01-07 14:24:26 -08:00
|
|
|
%{
|
|
|
|
package rules
|
|
|
|
|
|
|
|
import (
|
2013-07-11 09:38:44 -07:00
|
|
|
"fmt"
|
2013-01-07 14:24:26 -08:00
|
|
|
"strconv"
|
2013-04-09 05:00:25 -07:00
|
|
|
"strings"
|
2013-06-25 05:02:27 -07:00
|
|
|
|
|
|
|
clientmodel "github.com/prometheus/client_golang/model"
|
2013-01-07 14:24:26 -08:00
|
|
|
)
|
2013-07-11 09:38:44 -07:00
|
|
|
|
|
|
|
// Lex is called by the parser generated by "go tool yacc" to obtain each
|
|
|
|
// token. The method is opened before the matching rules block and closed at
|
|
|
|
// the end of the file.
|
|
|
|
func (lexer *RulesLexer) Lex(lval *yySymType) int {
|
|
|
|
// Internal lexer states.
|
|
|
|
const (
|
|
|
|
S_INITIAL = iota
|
|
|
|
S_COMMENTS
|
|
|
|
)
|
|
|
|
|
|
|
|
// We simulate multiple start symbols for closely-related grammars via dummy tokens. See
|
|
|
|
// http://www.gnu.org/software/bison/manual/html_node/Multiple-start_002dsymbols.html
|
|
|
|
// Reason: we want to be able to parse lists of named rules as well as single expressions.
|
|
|
|
if lexer.startToken != 0 {
|
|
|
|
startToken := lexer.startToken
|
|
|
|
lexer.startToken = 0
|
|
|
|
return startToken
|
|
|
|
}
|
|
|
|
|
|
|
|
c := lexer.current
|
|
|
|
currentState := 0
|
|
|
|
|
|
|
|
if lexer.empty {
|
|
|
|
c, lexer.empty = lexer.getChar(), false
|
|
|
|
}
|
|
|
|
|
2013-01-07 14:24:26 -08:00
|
|
|
%}
|
|
|
|
|
|
|
|
D [0-9]
|
2014-03-21 05:11:01 -07:00
|
|
|
L [a-zA-Z_]
|
|
|
|
M [a-zA-Z_:]
|
2013-01-21 16:51:26 -08:00
|
|
|
U [smhdwy]
|
2013-01-07 14:24:26 -08:00
|
|
|
|
2015-03-03 03:16:46 -08:00
|
|
|
FLOAT ({D}*\.?{D}+|{D}+\.?{D}*){EXPONENT}?|[+-]?[iI][nN][fF]|[nN][aA][nN]
|
2015-03-01 10:18:33 -08:00
|
|
|
EXPONENT [eE][-+]?[0-9]+
|
|
|
|
|
|
|
|
STR \"(\\.|[^\\"])*\"|\'(\\.|[^\\'])*\'
|
|
|
|
|
2013-01-07 14:24:26 -08:00
|
|
|
%x S_COMMENTS
|
|
|
|
|
2013-07-11 09:38:44 -07:00
|
|
|
%yyc c
|
|
|
|
%yyn c = lexer.getChar()
|
|
|
|
%yyt currentState
|
|
|
|
|
2013-01-07 14:24:26 -08:00
|
|
|
%%
|
2013-07-11 09:38:44 -07:00
|
|
|
lexer.buf = lexer.buf[:0] // The code before the first rule executed before every scan cycle (rule #0 / state 0 action)
|
|
|
|
|
|
|
|
"/*" currentState = S_COMMENTS
|
|
|
|
<S_COMMENTS>"*/" currentState = S_INITIAL
|
|
|
|
<S_COMMENTS>.|\n /* ignore chars within multi-line comments */
|
|
|
|
|
|
|
|
\/\/[^\r\n]*\n /* gobble up one-line comments */
|
|
|
|
|
|
|
|
ALERT|alert return ALERT
|
|
|
|
IF|if return IF
|
|
|
|
FOR|for return FOR
|
|
|
|
WITH|with return WITH
|
2013-07-30 08:18:07 -07:00
|
|
|
SUMMARY|summary return SUMMARY
|
|
|
|
DESCRIPTION|description return DESCRIPTION
|
2013-07-11 09:38:44 -07:00
|
|
|
|
|
|
|
PERMANENT|permanent return PERMANENT
|
|
|
|
BY|by return GROUP_OP
|
2015-03-03 01:58:28 -08:00
|
|
|
ON|on return MATCH_OP
|
|
|
|
GROUP_LEFT|GROUP_RIGHT lval.str = lexer.token(); return MATCH_MOD
|
|
|
|
group_left|group_right lval.str = strings.ToUpper(lexer.token()); return MATCH_MOD
|
2013-12-13 10:20:42 -08:00
|
|
|
KEEPING_EXTRA|keeping_extra return KEEPING_EXTRA
|
2015-02-17 17:30:41 -08:00
|
|
|
OFFSET|offset return OFFSET
|
2015-04-16 16:12:04 -07:00
|
|
|
AVG|SUM|MAX|MIN|COUNT|STDVAR|STDDEV lval.str = lexer.token(); return AGGR_OP
|
|
|
|
avg|sum|max|min|count|stdvar|stddev lval.str = strings.ToUpper(lexer.token()); return AGGR_OP
|
2013-07-11 09:38:44 -07:00
|
|
|
\<|>|AND|OR|and|or lval.str = strings.ToUpper(lexer.token()); return CMP_OP
|
2014-03-28 03:58:47 -07:00
|
|
|
==|!=|>=|<=|=~|!~ lval.str = lexer.token(); return CMP_OP
|
2013-07-11 09:38:44 -07:00
|
|
|
[+\-] lval.str = lexer.token(); return ADDITIVE_OP
|
|
|
|
[*/%] lval.str = lexer.token(); return MULT_OP
|
|
|
|
|
2015-03-01 10:18:33 -08:00
|
|
|
{FLOAT} num, err := strconv.ParseFloat(lexer.token(), 64);
|
2013-07-11 09:38:44 -07:00
|
|
|
if (err != nil && err.(*strconv.NumError).Err == strconv.ErrSyntax) {
|
|
|
|
panic("Invalid float")
|
|
|
|
}
|
|
|
|
lval.num = clientmodel.SampleValue(num)
|
|
|
|
return NUMBER
|
|
|
|
|
2015-03-01 10:18:33 -08:00
|
|
|
{D}+{U} lval.str = lexer.token(); return DURATION
|
|
|
|
{L}({L}|{D})* lval.str = lexer.token(); return IDENTIFIER
|
|
|
|
{M}({M}|{D})* lval.str = lexer.token(); return METRICNAME
|
|
|
|
|
|
|
|
{STR} lval.str = lexer.token()[1:len(lexer.token()) - 1]; return STRING
|
2013-07-11 09:38:44 -07:00
|
|
|
|
|
|
|
[{}\[\]()=,] return int(lexer.buf[0])
|
|
|
|
[\t\n\r ] /* gobble up any whitespace */
|
2013-01-07 14:24:26 -08:00
|
|
|
%%
|
2013-07-11 09:38:44 -07:00
|
|
|
|
|
|
|
lexer.empty = true
|
|
|
|
return int(c)
|
|
|
|
}
|