prometheus/rules/lexer.l
Julius Volz c7c0b33d0b Add regex-matching support for labels.
There are four label-matching ops for selecting timeseries now:

- Equal: =
- NotEqual: !=
- RegexMatch: =~
- RegexNoMatch: !~

Instead of looking up labels by a simple clientmodel.LabelSet (basically
an equals op for every key/value pair in the set), timeseries
fingerprint selection is now done via a list of metric.LabelMatchers.

Change-Id: I510a83f761198e80946146770ebb64e4abc3bb96
2014-04-01 14:24:53 +02:00

111 lines
3.7 KiB
Plaintext

/* Copyright 2013 Prometheus Team
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. */
%{
package rules
import (
"fmt"
"strconv"
"strings"
clientmodel "github.com/prometheus/client_golang/model"
)
// Lex is called by the parser generated by "go tool yacc" to obtain each
// token. The method is opened before the matching rules block and closed at
// the end of the file.
func (lexer *RulesLexer) Lex(lval *yySymType) int {
// Internal lexer states.
const (
S_INITIAL = iota
S_COMMENTS
)
// We simulate multiple start symbols for closely-related grammars via dummy tokens. See
// http://www.gnu.org/software/bison/manual/html_node/Multiple-start_002dsymbols.html
// Reason: we want to be able to parse lists of named rules as well as single expressions.
if lexer.startToken != 0 {
startToken := lexer.startToken
lexer.startToken = 0
return startToken
}
c := lexer.current
currentState := 0
if lexer.empty {
c, lexer.empty = lexer.getChar(), false
}
%}
D [0-9]
L [a-zA-Z_]
M [a-zA-Z_:]
U [smhdwy]
%x S_COMMENTS
%yyc c
%yyn c = lexer.getChar()
%yyt currentState
%%
lexer.buf = lexer.buf[:0] // The code before the first rule executed before every scan cycle (rule #0 / state 0 action)
"/*" currentState = S_COMMENTS
<S_COMMENTS>"*/" currentState = S_INITIAL
<S_COMMENTS>.|\n /* ignore chars within multi-line comments */
\/\/[^\r\n]*\n /* gobble up one-line comments */
ALERT|alert return ALERT
IF|if return IF
FOR|for return FOR
WITH|with return WITH
SUMMARY|summary return SUMMARY
DESCRIPTION|description return DESCRIPTION
PERMANENT|permanent return PERMANENT
BY|by return GROUP_OP
KEEPING_EXTRA|keeping_extra return KEEPING_EXTRA
AVG|SUM|MAX|MIN|COUNT lval.str = lexer.token(); return AGGR_OP
avg|sum|max|min|count lval.str = strings.ToUpper(lexer.token()); return AGGR_OP
\<|>|AND|OR|and|or lval.str = strings.ToUpper(lexer.token()); return CMP_OP
==|!=|>=|<=|=~|!~ lval.str = lexer.token(); return CMP_OP
[+\-] lval.str = lexer.token(); return ADDITIVE_OP
[*/%] lval.str = lexer.token(); return MULT_OP
{D}+{U} lval.str = lexer.token(); return DURATION
{L}({L}|{D})* lval.str = lexer.token(); return IDENTIFIER
{M}({M}|{D})* lval.str = lexer.token(); return METRICNAME
\-?{D}+(\.{D}*)? num, err := strconv.ParseFloat(lexer.token(), 64);
if (err != nil && err.(*strconv.NumError).Err == strconv.ErrSyntax) {
panic("Invalid float")
}
lval.num = clientmodel.SampleValue(num)
return NUMBER
\"(\\.|[^\\"])*\" lval.str = lexer.token()[1:len(lexer.token()) - 1]; return STRING
\'(\\.|[^\\'])*\' lval.str = lexer.token()[1:len(lexer.token()) - 1]; return STRING
[{}\[\]()=,] return int(lexer.buf[0])
[\t\n\r ] /* gobble up any whitespace */
%%
lexer.empty = true
return int(c)
}