2015-03-30 09:12:51 -07:00
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promql
import (
"fmt"
"strings"
2015-09-30 12:27:08 -07:00
"unicode"
2015-03-30 09:12:51 -07:00
"unicode/utf8"
2016-04-16 13:25:51 -07:00
"github.com/prometheus/common/log"
2015-03-30 09:12:51 -07:00
)
// item represents a token or text string returned from the scanner.
type item struct {
typ itemType // The type of this item.
pos Pos // The starting position, in bytes, of this item in the input string.
val string // The value of this item.
}
// String returns a descriptive string for the item.
func ( i item ) String ( ) string {
switch {
case i . typ == itemEOF :
return "EOF"
case i . typ == itemError :
return i . val
2015-04-29 07:35:18 -07:00
case i . typ == itemIdentifier || i . typ == itemMetricIdentifier :
return fmt . Sprintf ( "%q" , i . val )
2015-03-30 09:12:51 -07:00
case i . typ . isKeyword ( ) :
return fmt . Sprintf ( "<%s>" , i . val )
case i . typ . isOperator ( ) :
return fmt . Sprintf ( "<op:%s>" , i . val )
case i . typ . isAggregator ( ) :
return fmt . Sprintf ( "<aggr:%s>" , i . val )
case len ( i . val ) > 10 :
return fmt . Sprintf ( "%.10q..." , i . val )
}
return fmt . Sprintf ( "%q" , i . val )
}
2016-04-02 15:52:18 -07:00
// isOperator returns true if the item corresponds to a arithmetic or set operator.
2015-03-30 09:12:51 -07:00
// Returns false otherwise.
func ( i itemType ) isOperator ( ) bool { return i > operatorsStart && i < operatorsEnd }
// isAggregator returns true if the item belongs to the aggregator functions.
// Returns false otherwise
func ( i itemType ) isAggregator ( ) bool { return i > aggregatorsStart && i < aggregatorsEnd }
// isKeyword returns true if the item corresponds to a keyword.
// Returns false otherwise.
func ( i itemType ) isKeyword ( ) bool { return i > keywordsStart && i < keywordsEnd }
2015-10-10 08:19:14 -07:00
// isCompairsonOperator returns true if the item corresponds to a comparison operator.
// Returns false otherwise.
func ( i itemType ) isComparisonOperator ( ) bool {
switch i {
case itemEQL , itemNEQ , itemLTE , itemLSS , itemGTE , itemGTR :
return true
default :
return false
}
}
2016-04-02 15:52:18 -07:00
// isSetOperator returns whether the item corresponds to a set operator.
func ( i itemType ) isSetOperator ( ) bool {
switch i {
case itemLAND , itemLOR , itemLUnless :
return true
}
return false
}
2015-03-30 09:12:51 -07:00
// Constants for operator precedence in expressions.
//
const LowestPrec = 0 // Non-operators.
// Precedence returns the operator precedence of the binary
// operator op. If op is not a binary operator, the result
// is LowestPrec.
func ( i itemType ) precedence ( ) int {
switch i {
case itemLOR :
return 1
2016-04-02 15:52:18 -07:00
case itemLAND , itemLUnless :
2015-03-30 09:12:51 -07:00
return 2
case itemEQL , itemNEQ , itemLTE , itemLSS , itemGTE , itemGTR :
return 3
case itemADD , itemSUB :
return 4
case itemMUL , itemDIV , itemMOD :
return 5
default :
return LowestPrec
}
}
type itemType int
const (
itemError itemType = iota // Error occurred, value is error message
itemEOF
itemComment
itemIdentifier
itemMetricIdentifier
itemLeftParen
itemRightParen
itemLeftBrace
itemRightBrace
itemLeftBracket
itemRightBracket
itemComma
itemAssign
itemSemicolon
itemString
itemNumber
itemDuration
2015-05-12 01:39:10 -07:00
itemBlank
itemTimes
2015-03-30 09:12:51 -07:00
operatorsStart
// Operators.
itemSUB
itemADD
itemMUL
itemMOD
itemDIV
itemLAND
itemLOR
2016-04-02 15:52:18 -07:00
itemLUnless
2015-03-30 09:12:51 -07:00
itemEQL
itemNEQ
itemLTE
itemLSS
itemGTE
itemGTR
itemEQLRegex
itemNEQRegex
operatorsEnd
aggregatorsStart
// Aggregators.
itemAvg
itemCount
itemSum
itemMin
itemMax
itemStddev
itemStdvar
aggregatorsEnd
keywordsStart
// Keywords.
itemAlert
itemIf
itemFor
2015-12-23 05:54:02 -08:00
itemLabels
2015-12-11 08:02:34 -08:00
itemAnnotations
2015-06-12 05:21:01 -07:00
itemKeepCommon
2015-03-30 09:12:51 -07:00
itemOffset
itemBy
2016-02-07 10:03:16 -08:00
itemWithout
2015-03-30 09:12:51 -07:00
itemOn
2016-04-21 03:45:06 -07:00
itemIgnoring
2015-03-30 09:12:51 -07:00
itemGroupLeft
itemGroupRight
2015-09-02 06:51:44 -07:00
itemBool
2016-04-16 13:25:51 -07:00
// Removed keywords. Just here to detect and print errors.
itemSummary
itemDescription
itemRunbook
2015-03-30 09:12:51 -07:00
keywordsEnd
)
var key = map [ string ] itemType {
// Operators.
2016-04-02 15:52:18 -07:00
"and" : itemLAND ,
"or" : itemLOR ,
"unless" : itemLUnless ,
2015-03-30 09:12:51 -07:00
// Aggregators.
"sum" : itemSum ,
"avg" : itemAvg ,
"count" : itemCount ,
"min" : itemMin ,
"max" : itemMax ,
"stddev" : itemStddev ,
"stdvar" : itemStdvar ,
// Keywords.
"alert" : itemAlert ,
"if" : itemIf ,
"for" : itemFor ,
2015-12-23 05:54:02 -08:00
"labels" : itemLabels ,
2015-12-11 08:02:34 -08:00
"annotations" : itemAnnotations ,
2015-03-30 09:12:51 -07:00
"offset" : itemOffset ,
"by" : itemBy ,
2016-02-07 10:03:16 -08:00
"without" : itemWithout ,
2015-06-12 05:21:01 -07:00
"keeping_extra" : itemKeepCommon ,
"keep_common" : itemKeepCommon ,
2015-03-30 09:12:51 -07:00
"on" : itemOn ,
2016-04-21 03:45:06 -07:00
"ignoring" : itemIgnoring ,
2015-03-30 09:12:51 -07:00
"group_left" : itemGroupLeft ,
"group_right" : itemGroupRight ,
2015-09-02 06:51:44 -07:00
"bool" : itemBool ,
2016-04-16 13:25:51 -07:00
// Removed keywords. Just here to detect and print errors.
"summary" : itemSummary ,
"description" : itemDescription ,
"runbook" : itemRunbook ,
2015-03-30 09:12:51 -07:00
}
// These are the default string representations for common items. It does not
// imply that those are the only character sequences that can be lexed to such an item.
var itemTypeStr = map [ itemType ] string {
2015-04-29 07:35:18 -07:00
itemLeftParen : "(" ,
itemRightParen : ")" ,
itemLeftBrace : "{" ,
itemRightBrace : "}" ,
itemLeftBracket : "[" ,
itemRightBracket : "]" ,
itemComma : "," ,
itemAssign : "=" ,
itemSemicolon : ";" ,
2015-05-12 01:39:10 -07:00
itemBlank : "_" ,
itemTimes : "x" ,
2015-04-29 07:35:18 -07:00
2015-03-30 09:12:51 -07:00
itemSUB : "-" ,
itemADD : "+" ,
itemMUL : "*" ,
itemMOD : "%" ,
itemDIV : "/" ,
itemEQL : "==" ,
itemNEQ : "!=" ,
itemLTE : "<=" ,
itemLSS : "<" ,
itemGTE : ">=" ,
itemGTR : ">" ,
itemEQLRegex : "=~" ,
itemNEQRegex : "!~" ,
}
func init ( ) {
// Add keywords to item type strings.
for s , ty := range key {
itemTypeStr [ ty ] = s
}
2015-05-12 01:39:10 -07:00
// Special numbers.
key [ "inf" ] = itemNumber
key [ "nan" ] = itemNumber
2015-03-30 09:12:51 -07:00
}
2015-08-24 06:07:27 -07:00
func ( i itemType ) String ( ) string {
if s , ok := itemTypeStr [ i ] ; ok {
2015-03-30 09:12:51 -07:00
return s
}
2015-08-24 06:07:27 -07:00
return fmt . Sprintf ( "<item %d>" , i )
2015-04-29 07:35:18 -07:00
}
func ( i item ) desc ( ) string {
if _ , ok := itemTypeStr [ i . typ ] ; ok {
return i . String ( )
}
if i . typ == itemEOF {
return i . typ . desc ( )
}
return fmt . Sprintf ( "%s %s" , i . typ . desc ( ) , i )
}
2015-08-24 06:07:27 -07:00
func ( i itemType ) desc ( ) string {
switch i {
2015-04-29 07:35:18 -07:00
case itemError :
return "error"
case itemEOF :
return "end of input"
case itemComment :
return "comment"
case itemIdentifier :
return "identifier"
case itemMetricIdentifier :
return "metric identifier"
case itemString :
return "string"
case itemNumber :
return "number"
case itemDuration :
return "duration"
}
2015-08-24 06:07:27 -07:00
return fmt . Sprintf ( "%q" , i )
2015-03-30 09:12:51 -07:00
}
const eof = - 1
// stateFn represents the state of the scanner as a function that returns the next state.
type stateFn func ( * lexer ) stateFn
// Pos is the position in a string.
type Pos int
// lexer holds the state of the scanner.
type lexer struct {
input string // The string being scanned.
state stateFn // The next lexing function to enter.
pos Pos // Current position in the input.
start Pos // Start position of this item.
width Pos // Width of last rune read from input.
lastPos Pos // Position of most recent item returned by nextItem.
items chan item // Channel of scanned items.
parenDepth int // Nesting depth of ( ) exprs.
braceOpen bool // Whether a { is opened.
bracketOpen bool // Whether a [ is opened.
stringOpen rune // Quote rune of the string currently being read.
2015-05-11 05:04:53 -07:00
// seriesDesc is set when a series description for the testing
// language is lexed.
seriesDesc bool
2015-03-30 09:12:51 -07:00
}
// next returns the next rune in the input.
func ( l * lexer ) next ( ) rune {
if int ( l . pos ) >= len ( l . input ) {
l . width = 0
return eof
}
r , w := utf8 . DecodeRuneInString ( l . input [ l . pos : ] )
l . width = Pos ( w )
l . pos += l . width
return r
}
// peek returns but does not consume the next rune in the input.
func ( l * lexer ) peek ( ) rune {
r := l . next ( )
l . backup ( )
return r
}
// backup steps back one rune. Can only be called once per call of next.
func ( l * lexer ) backup ( ) {
l . pos -= l . width
}
// emit passes an item back to the client.
func ( l * lexer ) emit ( t itemType ) {
l . items <- item { t , l . start , l . input [ l . start : l . pos ] }
l . start = l . pos
}
// ignore skips over the pending input before this point.
func ( l * lexer ) ignore ( ) {
l . start = l . pos
}
// accept consumes the next rune if it's from the valid set.
func ( l * lexer ) accept ( valid string ) bool {
2016-04-01 01:35:00 -07:00
if strings . ContainsRune ( valid , l . next ( ) ) {
2015-03-30 09:12:51 -07:00
return true
}
l . backup ( )
return false
}
// acceptRun consumes a run of runes from the valid set.
func ( l * lexer ) acceptRun ( valid string ) {
2016-04-01 01:35:00 -07:00
for strings . ContainsRune ( valid , l . next ( ) ) {
2015-03-30 09:12:51 -07:00
// consume
}
l . backup ( )
}
// lineNumber reports which line we're on, based on the position of
// the previous item returned by nextItem. Doing it this way
// means we don't have to worry about peek double counting.
func ( l * lexer ) lineNumber ( ) int {
return 1 + strings . Count ( l . input [ : l . lastPos ] , "\n" )
}
// linePosition reports at which character in the current line
// we are on.
2015-04-29 02:36:41 -07:00
func ( l * lexer ) linePosition ( ) int {
lb := strings . LastIndex ( l . input [ : l . lastPos ] , "\n" )
2015-03-30 09:12:51 -07:00
if lb == - 1 {
2015-04-29 02:36:41 -07:00
return 1 + int ( l . lastPos )
2015-03-30 09:12:51 -07:00
}
2015-04-29 02:36:41 -07:00
return 1 + int ( l . lastPos ) - lb
2015-03-30 09:12:51 -07:00
}
// errorf returns an error token and terminates the scan by passing
// back a nil pointer that will be the next state, terminating l.nextItem.
func ( l * lexer ) errorf ( format string , args ... interface { } ) stateFn {
l . items <- item { itemError , l . start , fmt . Sprintf ( format , args ... ) }
return nil
}
// nextItem returns the next item from the input.
func ( l * lexer ) nextItem ( ) item {
item := <- l . items
l . lastPos = item . pos
2016-04-16 13:25:51 -07:00
// TODO(fabxc): remove for version 1.0.
t := item . typ
if t == itemSummary || t == itemDescription || t == itemRunbook {
log . Errorf ( "Token %q is not valid anymore. Alerting rule syntax has changed with version 0.17.0. Please read https://prometheus.io/docs/alerting/rules/." , item )
}
2015-03-30 09:12:51 -07:00
return item
}
// lex creates a new scanner for the input string.
2015-04-29 02:36:41 -07:00
func lex ( input string ) * lexer {
2015-03-30 09:12:51 -07:00
l := & lexer {
input : input ,
items : make ( chan item ) ,
}
go l . run ( )
return l
}
// run runs the state machine for the lexer.
func ( l * lexer ) run ( ) {
for l . state = lexStatements ; l . state != nil ; {
l . state = l . state ( l )
}
close ( l . items )
}
// lineComment is the character that starts a line comment.
const lineComment = "#"
// lexStatements is the top-level state for lexing.
func lexStatements ( l * lexer ) stateFn {
if l . braceOpen {
return lexInsideBraces
}
if strings . HasPrefix ( l . input [ l . pos : ] , lineComment ) {
return lexLineComment
}
switch r := l . next ( ) ; {
case r == eof :
if l . parenDepth != 0 {
return l . errorf ( "unclosed left parenthesis" )
} else if l . bracketOpen {
return l . errorf ( "unclosed left bracket" )
}
l . emit ( itemEOF )
return nil
case r == ',' :
l . emit ( itemComma )
case isSpace ( r ) :
return lexSpace
case r == '*' :
l . emit ( itemMUL )
case r == '/' :
l . emit ( itemDIV )
case r == '%' :
l . emit ( itemMOD )
case r == '+' :
l . emit ( itemADD )
case r == '-' :
l . emit ( itemSUB )
case r == '=' :
if t := l . peek ( ) ; t == '=' {
l . next ( )
l . emit ( itemEQL )
} else if t == '~' {
2015-04-29 07:35:18 -07:00
return l . errorf ( "unexpected character after '=': %q" , t )
2015-03-30 09:12:51 -07:00
} else {
l . emit ( itemAssign )
}
case r == '!' :
if t := l . next ( ) ; t == '=' {
l . emit ( itemNEQ )
} else {
2015-04-29 07:35:18 -07:00
return l . errorf ( "unexpected character after '!': %q" , t )
2015-03-30 09:12:51 -07:00
}
case r == '<' :
if t := l . peek ( ) ; t == '=' {
l . next ( )
l . emit ( itemLTE )
} else {
l . emit ( itemLSS )
}
case r == '>' :
if t := l . peek ( ) ; t == '=' {
l . next ( )
l . emit ( itemGTE )
} else {
l . emit ( itemGTR )
}
2015-07-28 17:11:13 -07:00
case isDigit ( r ) || ( r == '.' && isDigit ( l . peek ( ) ) ) :
2015-03-30 09:12:51 -07:00
l . backup ( )
return lexNumberOrDuration
case r == '"' || r == '\'' :
l . stringOpen = r
return lexString
2015-09-30 12:27:08 -07:00
case r == '`' :
l . stringOpen = r
return lexRawString
2015-05-11 02:45:23 -07:00
case isAlpha ( r ) || r == ':' :
2015-03-30 09:12:51 -07:00
l . backup ( )
return lexKeywordOrIdentifier
case r == '(' :
l . emit ( itemLeftParen )
l . parenDepth ++
return lexStatements
case r == ')' :
l . emit ( itemRightParen )
l . parenDepth --
if l . parenDepth < 0 {
2015-04-29 07:35:18 -07:00
return l . errorf ( "unexpected right parenthesis %q" , r )
2015-03-30 09:12:51 -07:00
}
return lexStatements
case r == '{' :
l . emit ( itemLeftBrace )
l . braceOpen = true
return lexInsideBraces ( l )
case r == '[' :
if l . bracketOpen {
2015-04-29 07:35:18 -07:00
return l . errorf ( "unexpected left bracket %q" , r )
2015-03-30 09:12:51 -07:00
}
l . emit ( itemLeftBracket )
l . bracketOpen = true
return lexDuration
case r == ']' :
if ! l . bracketOpen {
2015-04-29 07:35:18 -07:00
return l . errorf ( "unexpected right bracket %q" , r )
2015-03-30 09:12:51 -07:00
}
l . emit ( itemRightBracket )
l . bracketOpen = false
default :
2015-04-29 07:35:18 -07:00
return l . errorf ( "unexpected character: %q" , r )
2015-03-30 09:12:51 -07:00
}
return lexStatements
}
// lexInsideBraces scans the inside of a vector selector. Keywords are ignored and
// scanned as identifiers.
func lexInsideBraces ( l * lexer ) stateFn {
if strings . HasPrefix ( l . input [ l . pos : ] , lineComment ) {
return lexLineComment
}
switch r := l . next ( ) ; {
case r == eof :
2015-04-29 07:35:18 -07:00
return l . errorf ( "unexpected end of input inside braces" )
2015-03-30 09:12:51 -07:00
case isSpace ( r ) :
return lexSpace
2015-05-11 02:45:23 -07:00
case isAlpha ( r ) :
2015-03-30 09:12:51 -07:00
l . backup ( )
return lexIdentifier
case r == ',' :
l . emit ( itemComma )
case r == '"' || r == '\'' :
l . stringOpen = r
return lexString
2015-09-30 12:27:08 -07:00
case r == '`' :
l . stringOpen = r
return lexRawString
2015-03-30 09:12:51 -07:00
case r == '=' :
if l . next ( ) == '~' {
l . emit ( itemEQLRegex )
break
}
l . backup ( )
l . emit ( itemEQL )
case r == '!' :
switch nr := l . next ( ) ; {
case nr == '~' :
l . emit ( itemNEQRegex )
case nr == '=' :
l . emit ( itemNEQ )
default :
2015-04-29 07:35:18 -07:00
return l . errorf ( "unexpected character after '!' inside braces: %q" , nr )
2015-03-30 09:12:51 -07:00
}
case r == '{' :
2015-04-29 07:35:18 -07:00
return l . errorf ( "unexpected left brace %q" , r )
2015-03-30 09:12:51 -07:00
case r == '}' :
l . emit ( itemRightBrace )
l . braceOpen = false
2015-05-11 05:04:53 -07:00
if l . seriesDesc {
return lexValueSequence
}
2015-03-30 09:12:51 -07:00
return lexStatements
default :
2015-04-29 07:35:18 -07:00
return l . errorf ( "unexpected character inside braces: %q" , r )
2015-03-30 09:12:51 -07:00
}
return lexInsideBraces
}
2015-05-12 01:39:10 -07:00
// lexValueSequence scans a value sequence of a series description.
func lexValueSequence ( l * lexer ) stateFn {
switch r := l . next ( ) ; {
case r == eof :
return lexStatements
case isSpace ( r ) :
lexSpace ( l )
case r == '+' :
l . emit ( itemADD )
case r == '-' :
l . emit ( itemSUB )
case r == 'x' :
l . emit ( itemTimes )
case r == '_' :
l . emit ( itemBlank )
2015-07-28 17:11:13 -07:00
case isDigit ( r ) || ( r == '.' && isDigit ( l . peek ( ) ) ) :
2015-05-12 01:39:10 -07:00
l . backup ( )
lexNumber ( l )
case isAlpha ( r ) :
l . backup ( )
// We might lex invalid items here but this will be caught by the parser.
return lexKeywordOrIdentifier
default :
return l . errorf ( "unexpected character in series sequence: %q" , r )
}
return lexValueSequence
}
2015-09-30 12:27:08 -07:00
// lexEscape scans a string escape sequence. The initial escaping character (\)
// has already been seen.
//
// NOTE: This function as well as the helper function digitVal() and associated
// tests have been adapted from the corresponding functions in the "go/scanner"
// package of the Go standard library to work for Prometheus-style strings.
// None of the actual escaping/quoting logic was changed in this function - it
// was only modified to integrate with our lexer.
func lexEscape ( l * lexer ) {
var n int
var base , max uint32
ch := l . next ( )
switch ch {
case 'a' , 'b' , 'f' , 'n' , 'r' , 't' , 'v' , '\\' , l . stringOpen :
return
case '0' , '1' , '2' , '3' , '4' , '5' , '6' , '7' :
n , base , max = 3 , 8 , 255
case 'x' :
ch = l . next ( )
n , base , max = 2 , 16 , 255
case 'u' :
ch = l . next ( )
n , base , max = 4 , 16 , unicode . MaxRune
case 'U' :
ch = l . next ( )
n , base , max = 8 , 16 , unicode . MaxRune
case eof :
l . errorf ( "escape sequence not terminated" )
default :
l . errorf ( "unknown escape sequence %#U" , ch )
}
var x uint32
for n > 0 {
d := uint32 ( digitVal ( ch ) )
if d >= base {
if ch == eof {
l . errorf ( "escape sequence not terminated" )
}
l . errorf ( "illegal character %#U in escape sequence" , ch )
}
x = x * base + d
ch = l . next ( )
n --
}
if x > max || 0xD800 <= x && x < 0xE000 {
l . errorf ( "escape sequence is an invalid Unicode code point" )
}
}
// digitVal returns the digit value of a rune or 16 in case the rune does not
// represent a valid digit.
func digitVal ( ch rune ) int {
switch {
case '0' <= ch && ch <= '9' :
return int ( ch - '0' )
case 'a' <= ch && ch <= 'f' :
return int ( ch - 'a' + 10 )
case 'A' <= ch && ch <= 'F' :
return int ( ch - 'A' + 10 )
}
return 16 // Larger than any legal digit val.
}
2015-03-30 09:12:51 -07:00
// lexString scans a quoted string. The initial quote has already been seen.
func lexString ( l * lexer ) stateFn {
Loop :
for {
switch l . next ( ) {
case '\\' :
2015-09-30 12:27:08 -07:00
lexEscape ( l )
2015-03-30 09:12:51 -07:00
case eof , '\n' :
return l . errorf ( "unterminated quoted string" )
case l . stringOpen :
break Loop
}
}
l . emit ( itemString )
2015-09-30 12:27:08 -07:00
return lexStatements
}
// lexRawString scans a raw quoted string. The initial quote has already been seen.
func lexRawString ( l * lexer ) stateFn {
Loop :
for {
switch l . next ( ) {
case eof :
return l . errorf ( "unterminated raw string" )
case l . stringOpen :
break Loop
}
}
l . emit ( itemString )
2015-03-30 09:12:51 -07:00
return lexStatements
}
// lexSpace scans a run of space characters. One space has already been seen.
func lexSpace ( l * lexer ) stateFn {
for isSpace ( l . peek ( ) ) {
l . next ( )
}
l . ignore ( )
return lexStatements
}
// lexLineComment scans a line comment. Left comment marker is known to be present.
func lexLineComment ( l * lexer ) stateFn {
l . pos += Pos ( len ( lineComment ) )
for r := l . next ( ) ; ! isEndOfLine ( r ) && r != eof ; {
r = l . next ( )
}
l . backup ( )
l . emit ( itemComment )
return lexStatements
}
func lexDuration ( l * lexer ) stateFn {
if l . scanNumber ( ) {
return l . errorf ( "missing unit character in duration" )
}
// Next two chars must be a valid unit and a non-alphanumeric.
2015-04-29 07:35:18 -07:00
if l . accept ( "smhdwy" ) {
if isAlphaNumeric ( l . next ( ) ) {
return l . errorf ( "bad duration syntax: %q" , l . input [ l . start : l . pos ] )
}
l . backup ( )
2015-03-30 09:12:51 -07:00
l . emit ( itemDuration )
return lexStatements
}
return l . errorf ( "bad duration syntax: %q" , l . input [ l . start : l . pos ] )
}
// lexNumber scans a number: decimal, hex, oct or float.
func lexNumber ( l * lexer ) stateFn {
if ! l . scanNumber ( ) {
return l . errorf ( "bad number syntax: %q" , l . input [ l . start : l . pos ] )
}
l . emit ( itemNumber )
return lexStatements
}
// lexNumberOrDuration scans a number or a duration item.
func lexNumberOrDuration ( l * lexer ) stateFn {
if l . scanNumber ( ) {
l . emit ( itemNumber )
return lexStatements
}
// Next two chars must be a valid unit and a non-alphanumeric.
2015-04-29 07:35:18 -07:00
if l . accept ( "smhdwy" ) {
if isAlphaNumeric ( l . next ( ) ) {
return l . errorf ( "bad number or duration syntax: %q" , l . input [ l . start : l . pos ] )
}
l . backup ( )
2015-03-30 09:12:51 -07:00
l . emit ( itemDuration )
return lexStatements
}
return l . errorf ( "bad number or duration syntax: %q" , l . input [ l . start : l . pos ] )
}
// scanNumber scans numbers of different formats. The scanned item is
// not necessarily a valid number. This case is caught by the parser.
func ( l * lexer ) scanNumber ( ) bool {
digits := "0123456789"
2015-06-04 09:07:57 -07:00
// Disallow hexadecimal in series descriptions as the syntax is ambiguous.
2015-06-04 09:21:24 -07:00
if ! l . seriesDesc && l . accept ( "0" ) && l . accept ( "xX" ) {
2015-03-30 09:12:51 -07:00
digits = "0123456789abcdefABCDEF"
}
l . acceptRun ( digits )
if l . accept ( "." ) {
l . acceptRun ( digits )
}
if l . accept ( "eE" ) {
l . accept ( "+-" )
l . acceptRun ( "0123456789" )
}
2015-06-04 09:21:24 -07:00
// Next thing must not be alphanumeric unless it's the times token
// for series repetitions.
if r := l . peek ( ) ; ( l . seriesDesc && r == 'x' ) || ! isAlphaNumeric ( r ) {
return true
2015-03-30 09:12:51 -07:00
}
2015-06-04 09:21:24 -07:00
return false
2015-03-30 09:12:51 -07:00
}
2015-04-29 07:35:18 -07:00
// lexIdentifier scans an alphanumeric identifier. The next character
// is known to be a letter.
2015-03-30 09:12:51 -07:00
func lexIdentifier ( l * lexer ) stateFn {
for isAlphaNumeric ( l . next ( ) ) {
// absorb
}
l . backup ( )
l . emit ( itemIdentifier )
return lexStatements
}
// lexKeywordOrIdentifier scans an alphanumeric identifier which may contain
// a colon rune. If the identifier is a keyword the respective keyword item
// is scanned.
func lexKeywordOrIdentifier ( l * lexer ) stateFn {
Loop :
for {
switch r := l . next ( ) ; {
case isAlphaNumeric ( r ) || r == ':' :
// absorb.
default :
l . backup ( )
word := l . input [ l . start : l . pos ]
if kw , ok := key [ strings . ToLower ( word ) ] ; ok {
l . emit ( kw )
} else if ! strings . Contains ( word , ":" ) {
l . emit ( itemIdentifier )
} else {
l . emit ( itemMetricIdentifier )
}
break Loop
}
}
2015-05-11 05:04:53 -07:00
if l . seriesDesc && l . peek ( ) != '{' {
return lexValueSequence
}
2015-03-30 09:12:51 -07:00
return lexStatements
}
func isSpace ( r rune ) bool {
2015-06-02 09:33:49 -07:00
return r == ' ' || r == '\t' || r == '\n' || r == '\r'
2015-03-30 09:12:51 -07:00
}
// isEndOfLine reports whether r is an end-of-line character.
func isEndOfLine ( r rune ) bool {
return r == '\r' || r == '\n'
}
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
func isAlphaNumeric ( r rune ) bool {
2015-07-28 17:11:13 -07:00
return isAlpha ( r ) || isDigit ( r )
}
// isDigit reports whether r is a digit. Note: we cannot use unicode.IsDigit()
// instead because that also classifies non-Latin digits as digits. See
// https://github.com/prometheus/prometheus/issues/939.
func isDigit ( r rune ) bool {
return '0' <= r && r <= '9'
2015-05-11 02:45:23 -07:00
}
// isAlpha reports whether r is an alphabetic or underscore.
func isAlpha ( r rune ) bool {
return r == '_' || ( 'a' <= r && r <= 'z' ) || ( 'A' <= r && r <= 'Z' )
2015-03-30 09:12:51 -07:00
}