2015-03-30 09:12:51 -07:00
|
|
|
// Copyright 2015 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2020-02-03 08:23:44 -08:00
|
|
|
package parser
|
2015-03-30 09:12:51 -07:00
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"strings"
|
2015-09-30 12:27:08 -07:00
|
|
|
"unicode"
|
2015-03-30 09:12:51 -07:00
|
|
|
"unicode/utf8"
|
|
|
|
)
|
|
|
|
|
2019-12-09 11:03:31 -08:00
|
|
|
// Item represents a token or text string returned from the scanner.
|
|
|
|
type Item struct {
|
|
|
|
Typ ItemType // The type of this Item.
|
|
|
|
Pos Pos // The starting position, in bytes, of this Item in the input string.
|
|
|
|
Val string // The value of this Item.
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
2019-12-09 11:03:31 -08:00
|
|
|
// String returns a descriptive string for the Item.
|
|
|
|
func (i Item) String() string {
|
2015-03-30 09:12:51 -07:00
|
|
|
switch {
|
2019-12-09 11:03:31 -08:00
|
|
|
case i.Typ == EOF:
|
2015-03-30 09:12:51 -07:00
|
|
|
return "EOF"
|
2019-12-09 11:03:31 -08:00
|
|
|
case i.Typ == ERROR:
|
|
|
|
return i.Val
|
|
|
|
case i.Typ == IDENTIFIER || i.Typ == METRIC_IDENTIFIER:
|
|
|
|
return fmt.Sprintf("%q", i.Val)
|
2020-02-03 09:48:27 -08:00
|
|
|
case i.Typ.IsKeyword():
|
2019-12-09 11:03:31 -08:00
|
|
|
return fmt.Sprintf("<%s>", i.Val)
|
2020-02-03 09:48:27 -08:00
|
|
|
case i.Typ.IsOperator():
|
2019-12-09 11:03:31 -08:00
|
|
|
return fmt.Sprintf("<op:%s>", i.Val)
|
2020-02-03 09:48:27 -08:00
|
|
|
case i.Typ.IsAggregator():
|
2019-12-09 11:03:31 -08:00
|
|
|
return fmt.Sprintf("<aggr:%s>", i.Val)
|
|
|
|
case len(i.Val) > 10:
|
|
|
|
return fmt.Sprintf("%.10q...", i.Val)
|
|
|
|
}
|
|
|
|
return fmt.Sprintf("%q", i.Val)
|
|
|
|
}
|
|
|
|
|
2020-06-28 03:03:09 -07:00
|
|
|
// IsOperator returns true if the Item corresponds to a arithmetic or set operator.
|
2015-03-30 09:12:51 -07:00
|
|
|
// Returns false otherwise.
|
2020-02-03 09:48:27 -08:00
|
|
|
func (i ItemType) IsOperator() bool { return i > operatorsStart && i < operatorsEnd }
|
2015-03-30 09:12:51 -07:00
|
|
|
|
2020-06-28 03:03:09 -07:00
|
|
|
// IsAggregator returns true if the Item belongs to the aggregator functions.
|
2015-03-30 09:12:51 -07:00
|
|
|
// Returns false otherwise
|
2020-02-03 09:48:27 -08:00
|
|
|
func (i ItemType) IsAggregator() bool { return i > aggregatorsStart && i < aggregatorsEnd }
|
2015-03-30 09:12:51 -07:00
|
|
|
|
2020-06-28 03:03:09 -07:00
|
|
|
// IsAggregatorWithParam returns true if the Item is an aggregator that takes a parameter.
|
2016-07-04 05:10:42 -07:00
|
|
|
// Returns false otherwise
|
2020-02-03 09:48:27 -08:00
|
|
|
func (i ItemType) IsAggregatorWithParam() bool {
|
2019-11-26 05:29:42 -08:00
|
|
|
return i == TOPK || i == BOTTOMK || i == COUNT_VALUES || i == QUANTILE
|
2016-07-05 09:12:19 -07:00
|
|
|
}
|
2016-07-04 05:10:42 -07:00
|
|
|
|
2020-06-28 03:03:09 -07:00
|
|
|
// IsKeyword returns true if the Item corresponds to a keyword.
|
2015-03-30 09:12:51 -07:00
|
|
|
// Returns false otherwise.
|
2020-02-03 09:48:27 -08:00
|
|
|
func (i ItemType) IsKeyword() bool { return i > keywordsStart && i < keywordsEnd }
|
2015-03-30 09:12:51 -07:00
|
|
|
|
2020-02-03 09:48:27 -08:00
|
|
|
// IsComparisonOperator returns true if the Item corresponds to a comparison operator.
|
2015-10-10 08:19:14 -07:00
|
|
|
// Returns false otherwise.
|
2020-02-03 09:48:27 -08:00
|
|
|
func (i ItemType) IsComparisonOperator() bool {
|
2015-10-10 08:19:14 -07:00
|
|
|
switch i {
|
2019-11-26 05:29:42 -08:00
|
|
|
case EQL, NEQ, LTE, LSS, GTE, GTR:
|
2015-10-10 08:19:14 -07:00
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-28 03:03:09 -07:00
|
|
|
// IsSetOperator returns whether the Item corresponds to a set operator.
|
2020-02-03 09:48:27 -08:00
|
|
|
func (i ItemType) IsSetOperator() bool {
|
2016-04-02 15:52:18 -07:00
|
|
|
switch i {
|
2019-11-26 05:29:42 -08:00
|
|
|
case LAND, LOR, LUNLESS:
|
2016-04-02 15:52:18 -07:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-03-08 08:52:44 -08:00
|
|
|
type ItemType int
|
2015-03-30 09:12:51 -07:00
|
|
|
|
2019-12-18 06:18:52 -08:00
|
|
|
// This is a list of all keywords in PromQL.
|
|
|
|
// When changing this list, make sure to also change
|
|
|
|
// the maybe_label grammar rule in the generated parser
|
|
|
|
// to avoid misinterpretation of labels as keywords.
|
2018-03-08 08:52:44 -08:00
|
|
|
var key = map[string]ItemType{
|
2015-03-30 09:12:51 -07:00
|
|
|
// Operators.
|
2019-11-26 05:29:42 -08:00
|
|
|
"and": LAND,
|
|
|
|
"or": LOR,
|
|
|
|
"unless": LUNLESS,
|
2015-03-30 09:12:51 -07:00
|
|
|
|
|
|
|
// Aggregators.
|
2019-11-26 05:29:42 -08:00
|
|
|
"sum": SUM,
|
|
|
|
"avg": AVG,
|
|
|
|
"count": COUNT,
|
|
|
|
"min": MIN,
|
|
|
|
"max": MAX,
|
2020-06-30 07:51:18 -07:00
|
|
|
"group": GROUP,
|
2019-11-26 05:29:42 -08:00
|
|
|
"stddev": STDDEV,
|
|
|
|
"stdvar": STDVAR,
|
|
|
|
"topk": TOPK,
|
|
|
|
"bottomk": BOTTOMK,
|
|
|
|
"count_values": COUNT_VALUES,
|
|
|
|
"quantile": QUANTILE,
|
2015-03-30 09:12:51 -07:00
|
|
|
|
|
|
|
// Keywords.
|
2019-11-26 05:29:42 -08:00
|
|
|
"offset": OFFSET,
|
|
|
|
"by": BY,
|
|
|
|
"without": WITHOUT,
|
|
|
|
"on": ON,
|
|
|
|
"ignoring": IGNORING,
|
|
|
|
"group_left": GROUP_LEFT,
|
|
|
|
"group_right": GROUP_RIGHT,
|
|
|
|
"bool": BOOL,
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
2020-03-23 07:47:11 -07:00
|
|
|
// ItemTypeStr is the default string representations for common Items. It does not
|
2019-12-09 11:03:31 -08:00
|
|
|
// imply that those are the only character sequences that can be lexed to such an Item.
|
|
|
|
var ItemTypeStr = map[ItemType]string{
|
2019-11-26 05:29:42 -08:00
|
|
|
LEFT_PAREN: "(",
|
|
|
|
RIGHT_PAREN: ")",
|
|
|
|
LEFT_BRACE: "{",
|
|
|
|
RIGHT_BRACE: "}",
|
|
|
|
LEFT_BRACKET: "[",
|
|
|
|
RIGHT_BRACKET: "]",
|
|
|
|
COMMA: ",",
|
|
|
|
ASSIGN: "=",
|
|
|
|
COLON: ":",
|
|
|
|
SEMICOLON: ";",
|
|
|
|
BLANK: "_",
|
|
|
|
TIMES: "x",
|
|
|
|
SPACE: "<space>",
|
|
|
|
|
|
|
|
SUB: "-",
|
|
|
|
ADD: "+",
|
|
|
|
MUL: "*",
|
|
|
|
MOD: "%",
|
|
|
|
DIV: "/",
|
|
|
|
EQL: "==",
|
|
|
|
NEQ: "!=",
|
|
|
|
LTE: "<=",
|
|
|
|
LSS: "<",
|
|
|
|
GTE: ">=",
|
|
|
|
GTR: ">",
|
|
|
|
EQL_REGEX: "=~",
|
|
|
|
NEQ_REGEX: "!~",
|
|
|
|
POW: "^",
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
2019-12-09 11:03:31 -08:00
|
|
|
// Add keywords to Item type strings.
|
2015-03-30 09:12:51 -07:00
|
|
|
for s, ty := range key {
|
2019-12-09 11:03:31 -08:00
|
|
|
ItemTypeStr[ty] = s
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
2015-05-12 01:39:10 -07:00
|
|
|
// Special numbers.
|
2019-11-26 05:29:42 -08:00
|
|
|
key["inf"] = NUMBER
|
|
|
|
key["nan"] = NUMBER
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
2018-03-08 08:52:44 -08:00
|
|
|
func (i ItemType) String() string {
|
2019-12-09 11:03:31 -08:00
|
|
|
if s, ok := ItemTypeStr[i]; ok {
|
2015-03-30 09:12:51 -07:00
|
|
|
return s
|
|
|
|
}
|
2019-12-09 11:03:31 -08:00
|
|
|
return fmt.Sprintf("<Item %d>", i)
|
2015-04-29 07:35:18 -07:00
|
|
|
}
|
|
|
|
|
2019-12-09 11:03:31 -08:00
|
|
|
func (i Item) desc() string {
|
|
|
|
if _, ok := ItemTypeStr[i.Typ]; ok {
|
2015-04-29 07:35:18 -07:00
|
|
|
return i.String()
|
|
|
|
}
|
2019-12-09 11:03:31 -08:00
|
|
|
if i.Typ == EOF {
|
|
|
|
return i.Typ.desc()
|
2015-04-29 07:35:18 -07:00
|
|
|
}
|
2019-12-09 11:03:31 -08:00
|
|
|
return fmt.Sprintf("%s %s", i.Typ.desc(), i)
|
2015-04-29 07:35:18 -07:00
|
|
|
}
|
|
|
|
|
2018-03-08 08:52:44 -08:00
|
|
|
func (i ItemType) desc() string {
|
2015-08-24 06:07:27 -07:00
|
|
|
switch i {
|
2019-11-26 05:29:42 -08:00
|
|
|
case ERROR:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "error"
|
2019-11-26 05:29:42 -08:00
|
|
|
case EOF:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "end of input"
|
2019-11-26 05:29:42 -08:00
|
|
|
case COMMENT:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "comment"
|
2019-11-26 05:29:42 -08:00
|
|
|
case IDENTIFIER:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "identifier"
|
2019-11-26 05:29:42 -08:00
|
|
|
case METRIC_IDENTIFIER:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "metric identifier"
|
2019-11-26 05:29:42 -08:00
|
|
|
case STRING:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "string"
|
2019-11-26 05:29:42 -08:00
|
|
|
case NUMBER:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "number"
|
2019-11-26 05:29:42 -08:00
|
|
|
case DURATION:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "duration"
|
|
|
|
}
|
2015-08-24 06:07:27 -07:00
|
|
|
return fmt.Sprintf("%q", i)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
const eof = -1
|
|
|
|
|
|
|
|
// stateFn represents the state of the scanner as a function that returns the next state.
|
2019-12-09 11:03:31 -08:00
|
|
|
type stateFn func(*Lexer) stateFn
|
2015-03-30 09:12:51 -07:00
|
|
|
|
|
|
|
// Pos is the position in a string.
|
2020-01-14 08:12:15 -08:00
|
|
|
// Negative numbers indicate undefined positions.
|
2015-03-30 09:12:51 -07:00
|
|
|
type Pos int
|
|
|
|
|
2019-12-09 11:03:31 -08:00
|
|
|
// Lexer holds the state of the scanner.
|
|
|
|
type Lexer struct {
|
2020-01-09 03:26:58 -08:00
|
|
|
input string // The string being scanned.
|
|
|
|
state stateFn // The next lexing function to enter.
|
|
|
|
pos Pos // Current position in the input.
|
|
|
|
start Pos // Start position of this Item.
|
|
|
|
width Pos // Width of last rune read from input.
|
|
|
|
lastPos Pos // Position of most recent Item returned by NextItem.
|
|
|
|
itemp *Item // Pointer to where the next scanned item should be placed.
|
|
|
|
scannedItem bool // Set to true every time an item is scanned.
|
2015-03-30 09:12:51 -07:00
|
|
|
|
|
|
|
parenDepth int // Nesting depth of ( ) exprs.
|
|
|
|
braceOpen bool // Whether a { is opened.
|
|
|
|
bracketOpen bool // Whether a [ is opened.
|
2018-12-22 05:47:13 -08:00
|
|
|
gotColon bool // Whether we got a ':' after [ was opened.
|
2015-03-30 09:12:51 -07:00
|
|
|
stringOpen rune // Quote rune of the string currently being read.
|
2015-05-11 05:04:53 -07:00
|
|
|
|
|
|
|
// seriesDesc is set when a series description for the testing
|
|
|
|
// language is lexed.
|
|
|
|
seriesDesc bool
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// next returns the next rune in the input.
|
2019-12-09 11:03:31 -08:00
|
|
|
func (l *Lexer) next() rune {
|
2015-03-30 09:12:51 -07:00
|
|
|
if int(l.pos) >= len(l.input) {
|
|
|
|
l.width = 0
|
|
|
|
return eof
|
|
|
|
}
|
|
|
|
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
|
|
|
|
l.width = Pos(w)
|
|
|
|
l.pos += l.width
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
|
|
|
// peek returns but does not consume the next rune in the input.
|
2019-12-09 11:03:31 -08:00
|
|
|
func (l *Lexer) peek() rune {
|
2015-03-30 09:12:51 -07:00
|
|
|
r := l.next()
|
|
|
|
l.backup()
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
|
|
|
// backup steps back one rune. Can only be called once per call of next.
|
2019-12-09 11:03:31 -08:00
|
|
|
func (l *Lexer) backup() {
|
2015-03-30 09:12:51 -07:00
|
|
|
l.pos -= l.width
|
|
|
|
}
|
|
|
|
|
2019-12-09 11:03:31 -08:00
|
|
|
// emit passes an Item back to the client.
|
|
|
|
func (l *Lexer) emit(t ItemType) {
|
2020-01-09 03:26:58 -08:00
|
|
|
*l.itemp = Item{t, l.start, l.input[l.start:l.pos]}
|
2015-03-30 09:12:51 -07:00
|
|
|
l.start = l.pos
|
2020-01-09 03:26:58 -08:00
|
|
|
l.scannedItem = true
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// ignore skips over the pending input before this point.
|
2019-12-09 11:03:31 -08:00
|
|
|
func (l *Lexer) ignore() {
|
2015-03-30 09:12:51 -07:00
|
|
|
l.start = l.pos
|
|
|
|
}
|
|
|
|
|
|
|
|
// accept consumes the next rune if it's from the valid set.
|
2019-12-09 11:03:31 -08:00
|
|
|
func (l *Lexer) accept(valid string) bool {
|
2016-04-01 01:35:00 -07:00
|
|
|
if strings.ContainsRune(valid, l.next()) {
|
2015-03-30 09:12:51 -07:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
l.backup()
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// acceptRun consumes a run of runes from the valid set.
|
2019-12-09 11:03:31 -08:00
|
|
|
func (l *Lexer) acceptRun(valid string) {
|
2016-04-01 01:35:00 -07:00
|
|
|
for strings.ContainsRune(valid, l.next()) {
|
2015-03-30 09:12:51 -07:00
|
|
|
// consume
|
|
|
|
}
|
|
|
|
l.backup()
|
|
|
|
}
|
|
|
|
|
|
|
|
// errorf returns an error token and terminates the scan by passing
|
2019-12-09 11:03:31 -08:00
|
|
|
// back a nil pointer that will be the next state, terminating l.NextItem.
|
|
|
|
func (l *Lexer) errorf(format string, args ...interface{}) stateFn {
|
2020-01-09 03:26:58 -08:00
|
|
|
*l.itemp = Item{ERROR, l.start, fmt.Sprintf(format, args...)}
|
|
|
|
l.scannedItem = true
|
|
|
|
|
2015-03-30 09:12:51 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-01-09 03:26:58 -08:00
|
|
|
// NextItem writes the next item to the provided address.
|
|
|
|
func (l *Lexer) NextItem(itemp *Item) {
|
|
|
|
l.scannedItem = false
|
|
|
|
l.itemp = itemp
|
|
|
|
|
|
|
|
if l.state != nil {
|
|
|
|
for !l.scannedItem {
|
2019-11-21 10:43:09 -08:00
|
|
|
l.state = l.state(l)
|
|
|
|
}
|
2020-01-09 03:26:58 -08:00
|
|
|
} else {
|
|
|
|
l.emit(EOF)
|
2019-11-21 10:43:09 -08:00
|
|
|
}
|
2020-01-09 03:26:58 -08:00
|
|
|
|
|
|
|
l.lastPos = l.itemp.Pos
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
2020-01-09 03:27:26 -08:00
|
|
|
// Lex creates a new scanner for the input string.
|
2019-12-09 11:03:31 -08:00
|
|
|
func Lex(input string) *Lexer {
|
|
|
|
l := &Lexer{
|
2015-03-30 09:12:51 -07:00
|
|
|
input: input,
|
2019-11-21 10:43:09 -08:00
|
|
|
state: lexStatements,
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
return l
|
|
|
|
}
|
|
|
|
|
|
|
|
// lineComment is the character that starts a line comment.
|
|
|
|
const lineComment = "#"
|
|
|
|
|
|
|
|
// lexStatements is the top-level state for lexing.
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexStatements(l *Lexer) stateFn {
|
2015-03-30 09:12:51 -07:00
|
|
|
if l.braceOpen {
|
|
|
|
return lexInsideBraces
|
|
|
|
}
|
|
|
|
if strings.HasPrefix(l.input[l.pos:], lineComment) {
|
|
|
|
return lexLineComment
|
|
|
|
}
|
|
|
|
|
|
|
|
switch r := l.next(); {
|
|
|
|
case r == eof:
|
|
|
|
if l.parenDepth != 0 {
|
|
|
|
return l.errorf("unclosed left parenthesis")
|
|
|
|
} else if l.bracketOpen {
|
|
|
|
return l.errorf("unclosed left bracket")
|
|
|
|
}
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(EOF)
|
2015-03-30 09:12:51 -07:00
|
|
|
return nil
|
|
|
|
case r == ',':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(COMMA)
|
2015-03-30 09:12:51 -07:00
|
|
|
case isSpace(r):
|
|
|
|
return lexSpace
|
|
|
|
case r == '*':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(MUL)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '/':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(DIV)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '%':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(MOD)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '+':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(ADD)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '-':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(SUB)
|
2016-05-29 02:06:14 -07:00
|
|
|
case r == '^':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(POW)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '=':
|
|
|
|
if t := l.peek(); t == '=' {
|
|
|
|
l.next()
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(EQL)
|
2015-03-30 09:12:51 -07:00
|
|
|
} else if t == '~' {
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected character after '=': %q", t)
|
2015-03-30 09:12:51 -07:00
|
|
|
} else {
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(ASSIGN)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
case r == '!':
|
|
|
|
if t := l.next(); t == '=' {
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(NEQ)
|
2015-03-30 09:12:51 -07:00
|
|
|
} else {
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected character after '!': %q", t)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
case r == '<':
|
|
|
|
if t := l.peek(); t == '=' {
|
|
|
|
l.next()
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(LTE)
|
2015-03-30 09:12:51 -07:00
|
|
|
} else {
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(LSS)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
case r == '>':
|
|
|
|
if t := l.peek(); t == '=' {
|
|
|
|
l.next()
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(GTE)
|
2015-03-30 09:12:51 -07:00
|
|
|
} else {
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(GTR)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
2015-07-28 17:11:13 -07:00
|
|
|
case isDigit(r) || (r == '.' && isDigit(l.peek())):
|
2015-03-30 09:12:51 -07:00
|
|
|
l.backup()
|
|
|
|
return lexNumberOrDuration
|
|
|
|
case r == '"' || r == '\'':
|
|
|
|
l.stringOpen = r
|
|
|
|
return lexString
|
2015-09-30 12:27:08 -07:00
|
|
|
case r == '`':
|
|
|
|
l.stringOpen = r
|
|
|
|
return lexRawString
|
2015-05-11 02:45:23 -07:00
|
|
|
case isAlpha(r) || r == ':':
|
2018-12-22 05:47:13 -08:00
|
|
|
if !l.bracketOpen {
|
|
|
|
l.backup()
|
|
|
|
return lexKeywordOrIdentifier
|
|
|
|
}
|
|
|
|
if l.gotColon {
|
|
|
|
return l.errorf("unexpected colon %q", r)
|
|
|
|
}
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(COLON)
|
2018-12-22 05:47:13 -08:00
|
|
|
l.gotColon = true
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '(':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(LEFT_PAREN)
|
2015-03-30 09:12:51 -07:00
|
|
|
l.parenDepth++
|
|
|
|
return lexStatements
|
|
|
|
case r == ')':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(RIGHT_PAREN)
|
2015-03-30 09:12:51 -07:00
|
|
|
l.parenDepth--
|
|
|
|
if l.parenDepth < 0 {
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected right parenthesis %q", r)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
return lexStatements
|
|
|
|
case r == '{':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(LEFT_BRACE)
|
2015-03-30 09:12:51 -07:00
|
|
|
l.braceOpen = true
|
2020-01-09 03:26:58 -08:00
|
|
|
return lexInsideBraces
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '[':
|
|
|
|
if l.bracketOpen {
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected left bracket %q", r)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
2018-12-22 05:47:13 -08:00
|
|
|
l.gotColon = false
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(LEFT_BRACKET)
|
2019-11-11 00:56:24 -08:00
|
|
|
if isSpace(l.peek()) {
|
|
|
|
skipSpaces(l)
|
|
|
|
}
|
2015-03-30 09:12:51 -07:00
|
|
|
l.bracketOpen = true
|
|
|
|
return lexDuration
|
|
|
|
case r == ']':
|
|
|
|
if !l.bracketOpen {
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected right bracket %q", r)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(RIGHT_BRACKET)
|
2015-03-30 09:12:51 -07:00
|
|
|
l.bracketOpen = false
|
|
|
|
|
|
|
|
default:
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected character: %q", r)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
|
|
|
// lexInsideBraces scans the inside of a vector selector. Keywords are ignored and
|
|
|
|
// scanned as identifiers.
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexInsideBraces(l *Lexer) stateFn {
|
2015-03-30 09:12:51 -07:00
|
|
|
if strings.HasPrefix(l.input[l.pos:], lineComment) {
|
|
|
|
return lexLineComment
|
|
|
|
}
|
|
|
|
|
|
|
|
switch r := l.next(); {
|
|
|
|
case r == eof:
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected end of input inside braces")
|
2015-03-30 09:12:51 -07:00
|
|
|
case isSpace(r):
|
|
|
|
return lexSpace
|
2015-05-11 02:45:23 -07:00
|
|
|
case isAlpha(r):
|
2015-03-30 09:12:51 -07:00
|
|
|
l.backup()
|
|
|
|
return lexIdentifier
|
|
|
|
case r == ',':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(COMMA)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '"' || r == '\'':
|
|
|
|
l.stringOpen = r
|
|
|
|
return lexString
|
2015-09-30 12:27:08 -07:00
|
|
|
case r == '`':
|
|
|
|
l.stringOpen = r
|
|
|
|
return lexRawString
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '=':
|
|
|
|
if l.next() == '~' {
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(EQL_REGEX)
|
2015-03-30 09:12:51 -07:00
|
|
|
break
|
|
|
|
}
|
|
|
|
l.backup()
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(EQL)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '!':
|
|
|
|
switch nr := l.next(); {
|
|
|
|
case nr == '~':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(NEQ_REGEX)
|
2015-03-30 09:12:51 -07:00
|
|
|
case nr == '=':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(NEQ)
|
2015-03-30 09:12:51 -07:00
|
|
|
default:
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected character after '!' inside braces: %q", nr)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
case r == '{':
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected left brace %q", r)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '}':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(RIGHT_BRACE)
|
2015-03-30 09:12:51 -07:00
|
|
|
l.braceOpen = false
|
2015-05-11 05:04:53 -07:00
|
|
|
|
|
|
|
if l.seriesDesc {
|
|
|
|
return lexValueSequence
|
|
|
|
}
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
default:
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected character inside braces: %q", r)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
return lexInsideBraces
|
|
|
|
}
|
|
|
|
|
2015-05-12 01:39:10 -07:00
|
|
|
// lexValueSequence scans a value sequence of a series description.
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexValueSequence(l *Lexer) stateFn {
|
2015-05-12 01:39:10 -07:00
|
|
|
switch r := l.next(); {
|
|
|
|
case r == eof:
|
|
|
|
return lexStatements
|
|
|
|
case isSpace(r):
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(SPACE)
|
2015-05-12 01:39:10 -07:00
|
|
|
lexSpace(l)
|
|
|
|
case r == '+':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(ADD)
|
2015-05-12 01:39:10 -07:00
|
|
|
case r == '-':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(SUB)
|
2015-05-12 01:39:10 -07:00
|
|
|
case r == 'x':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(TIMES)
|
2015-05-12 01:39:10 -07:00
|
|
|
case r == '_':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(BLANK)
|
2015-07-28 17:11:13 -07:00
|
|
|
case isDigit(r) || (r == '.' && isDigit(l.peek())):
|
2015-05-12 01:39:10 -07:00
|
|
|
l.backup()
|
|
|
|
lexNumber(l)
|
|
|
|
case isAlpha(r):
|
|
|
|
l.backup()
|
2019-12-09 11:03:31 -08:00
|
|
|
// We might lex invalid Items here but this will be caught by the parser.
|
2015-05-12 01:39:10 -07:00
|
|
|
return lexKeywordOrIdentifier
|
|
|
|
default:
|
|
|
|
return l.errorf("unexpected character in series sequence: %q", r)
|
|
|
|
}
|
|
|
|
return lexValueSequence
|
|
|
|
}
|
|
|
|
|
2015-09-30 12:27:08 -07:00
|
|
|
// lexEscape scans a string escape sequence. The initial escaping character (\)
|
|
|
|
// has already been seen.
|
|
|
|
//
|
|
|
|
// NOTE: This function as well as the helper function digitVal() and associated
|
|
|
|
// tests have been adapted from the corresponding functions in the "go/scanner"
|
|
|
|
// package of the Go standard library to work for Prometheus-style strings.
|
|
|
|
// None of the actual escaping/quoting logic was changed in this function - it
|
|
|
|
// was only modified to integrate with our lexer.
|
2020-01-09 03:26:58 -08:00
|
|
|
func lexEscape(l *Lexer) stateFn {
|
2015-09-30 12:27:08 -07:00
|
|
|
var n int
|
|
|
|
var base, max uint32
|
|
|
|
|
|
|
|
ch := l.next()
|
|
|
|
switch ch {
|
|
|
|
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', l.stringOpen:
|
2020-01-09 03:26:58 -08:00
|
|
|
return lexString
|
2015-09-30 12:27:08 -07:00
|
|
|
case '0', '1', '2', '3', '4', '5', '6', '7':
|
|
|
|
n, base, max = 3, 8, 255
|
|
|
|
case 'x':
|
|
|
|
ch = l.next()
|
|
|
|
n, base, max = 2, 16, 255
|
|
|
|
case 'u':
|
|
|
|
ch = l.next()
|
|
|
|
n, base, max = 4, 16, unicode.MaxRune
|
|
|
|
case 'U':
|
|
|
|
ch = l.next()
|
|
|
|
n, base, max = 8, 16, unicode.MaxRune
|
|
|
|
case eof:
|
|
|
|
l.errorf("escape sequence not terminated")
|
2020-01-09 03:26:58 -08:00
|
|
|
return lexString
|
2015-09-30 12:27:08 -07:00
|
|
|
default:
|
|
|
|
l.errorf("unknown escape sequence %#U", ch)
|
2020-01-09 03:26:58 -08:00
|
|
|
return lexString
|
2015-09-30 12:27:08 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
var x uint32
|
|
|
|
for n > 0 {
|
|
|
|
d := uint32(digitVal(ch))
|
|
|
|
if d >= base {
|
|
|
|
if ch == eof {
|
|
|
|
l.errorf("escape sequence not terminated")
|
2020-01-09 03:26:58 -08:00
|
|
|
return lexString
|
2015-09-30 12:27:08 -07:00
|
|
|
}
|
|
|
|
l.errorf("illegal character %#U in escape sequence", ch)
|
2020-01-09 03:26:58 -08:00
|
|
|
return lexString
|
2015-09-30 12:27:08 -07:00
|
|
|
}
|
|
|
|
x = x*base + d
|
|
|
|
ch = l.next()
|
|
|
|
n--
|
|
|
|
}
|
|
|
|
|
|
|
|
if x > max || 0xD800 <= x && x < 0xE000 {
|
|
|
|
l.errorf("escape sequence is an invalid Unicode code point")
|
|
|
|
}
|
2020-01-09 03:26:58 -08:00
|
|
|
return lexString
|
2015-09-30 12:27:08 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// digitVal returns the digit value of a rune or 16 in case the rune does not
|
|
|
|
// represent a valid digit.
|
|
|
|
func digitVal(ch rune) int {
|
|
|
|
switch {
|
|
|
|
case '0' <= ch && ch <= '9':
|
|
|
|
return int(ch - '0')
|
|
|
|
case 'a' <= ch && ch <= 'f':
|
|
|
|
return int(ch - 'a' + 10)
|
|
|
|
case 'A' <= ch && ch <= 'F':
|
|
|
|
return int(ch - 'A' + 10)
|
|
|
|
}
|
|
|
|
return 16 // Larger than any legal digit val.
|
|
|
|
}
|
|
|
|
|
2019-11-11 00:56:24 -08:00
|
|
|
// skipSpaces skips the spaces until a non-space is encountered.
|
2019-12-09 11:03:31 -08:00
|
|
|
func skipSpaces(l *Lexer) {
|
2019-11-11 00:56:24 -08:00
|
|
|
for isSpace(l.peek()) {
|
|
|
|
l.next()
|
|
|
|
}
|
|
|
|
l.ignore()
|
|
|
|
}
|
|
|
|
|
2015-03-30 09:12:51 -07:00
|
|
|
// lexString scans a quoted string. The initial quote has already been seen.
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexString(l *Lexer) stateFn {
|
2015-03-30 09:12:51 -07:00
|
|
|
Loop:
|
|
|
|
for {
|
|
|
|
switch l.next() {
|
|
|
|
case '\\':
|
2020-01-09 03:26:58 -08:00
|
|
|
return lexEscape
|
2017-06-16 07:19:24 -07:00
|
|
|
case utf8.RuneError:
|
2020-01-09 03:26:58 -08:00
|
|
|
l.errorf("invalid UTF-8 rune")
|
|
|
|
return lexString
|
2015-03-30 09:12:51 -07:00
|
|
|
case eof, '\n':
|
|
|
|
return l.errorf("unterminated quoted string")
|
|
|
|
case l.stringOpen:
|
|
|
|
break Loop
|
|
|
|
}
|
|
|
|
}
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(STRING)
|
2015-09-30 12:27:08 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
|
|
|
// lexRawString scans a raw quoted string. The initial quote has already been seen.
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexRawString(l *Lexer) stateFn {
|
2015-09-30 12:27:08 -07:00
|
|
|
Loop:
|
|
|
|
for {
|
|
|
|
switch l.next() {
|
2017-06-16 07:19:24 -07:00
|
|
|
case utf8.RuneError:
|
2020-01-09 03:26:58 -08:00
|
|
|
l.errorf("invalid UTF-8 rune")
|
|
|
|
return lexRawString
|
2015-09-30 12:27:08 -07:00
|
|
|
case eof:
|
2020-01-09 03:26:58 -08:00
|
|
|
l.errorf("unterminated raw string")
|
|
|
|
return lexRawString
|
2015-09-30 12:27:08 -07:00
|
|
|
case l.stringOpen:
|
|
|
|
break Loop
|
|
|
|
}
|
|
|
|
}
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(STRING)
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
|
|
|
// lexSpace scans a run of space characters. One space has already been seen.
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexSpace(l *Lexer) stateFn {
|
2015-03-30 09:12:51 -07:00
|
|
|
for isSpace(l.peek()) {
|
|
|
|
l.next()
|
|
|
|
}
|
|
|
|
l.ignore()
|
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
|
|
|
// lexLineComment scans a line comment. Left comment marker is known to be present.
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexLineComment(l *Lexer) stateFn {
|
2015-03-30 09:12:51 -07:00
|
|
|
l.pos += Pos(len(lineComment))
|
|
|
|
for r := l.next(); !isEndOfLine(r) && r != eof; {
|
|
|
|
r = l.next()
|
|
|
|
}
|
|
|
|
l.backup()
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(COMMENT)
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexDuration(l *Lexer) stateFn {
|
2015-03-30 09:12:51 -07:00
|
|
|
if l.scanNumber() {
|
|
|
|
return l.errorf("missing unit character in duration")
|
|
|
|
}
|
2020-08-04 12:12:41 -07:00
|
|
|
if !acceptRemainingDuration(l) {
|
|
|
|
return l.errorf("bad duration syntax: %q", l.input[l.start:l.pos])
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
2020-08-04 12:12:41 -07:00
|
|
|
l.backup()
|
|
|
|
l.emit(DURATION)
|
|
|
|
return lexStatements
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// lexNumber scans a number: decimal, hex, oct or float.
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexNumber(l *Lexer) stateFn {
|
2015-03-30 09:12:51 -07:00
|
|
|
if !l.scanNumber() {
|
|
|
|
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
|
|
|
|
}
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(NUMBER)
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
2019-12-09 11:03:31 -08:00
|
|
|
// lexNumberOrDuration scans a number or a duration Item.
|
|
|
|
func lexNumberOrDuration(l *Lexer) stateFn {
|
2015-03-30 09:12:51 -07:00
|
|
|
if l.scanNumber() {
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(NUMBER)
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
// Next two chars must be a valid unit and a non-alphanumeric.
|
2020-08-04 12:12:41 -07:00
|
|
|
if acceptRemainingDuration(l) {
|
2015-04-29 07:35:18 -07:00
|
|
|
l.backup()
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(DURATION)
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
return l.errorf("bad number or duration syntax: %q", l.input[l.start:l.pos])
|
|
|
|
}
|
|
|
|
|
2020-08-04 12:12:41 -07:00
|
|
|
func acceptRemainingDuration(l *Lexer) bool {
|
|
|
|
// Next two char must be a valid duration.
|
|
|
|
if !l.accept("smhdwy") {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
// Support for ms. Bad units like hs, ys will be caught when we actually
|
|
|
|
// parse the duration.
|
|
|
|
l.accept("s")
|
|
|
|
// Next char can be another number then a unit.
|
|
|
|
for l.accept("0123456789") {
|
|
|
|
for l.accept("0123456789") {
|
|
|
|
}
|
|
|
|
// y is no longer in the list as it should always come first in
|
|
|
|
// durations.
|
|
|
|
if !l.accept("smhdw") {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
// Support for ms. Bad units like hs, ys will be caught when we actually
|
|
|
|
// parse the duration.
|
|
|
|
l.accept("s")
|
|
|
|
}
|
|
|
|
return !isAlphaNumeric(l.next())
|
|
|
|
}
|
|
|
|
|
2019-12-09 11:03:31 -08:00
|
|
|
// scanNumber scans numbers of different formats. The scanned Item is
|
2015-03-30 09:12:51 -07:00
|
|
|
// not necessarily a valid number. This case is caught by the parser.
|
2019-12-09 11:03:31 -08:00
|
|
|
func (l *Lexer) scanNumber() bool {
|
2015-03-30 09:12:51 -07:00
|
|
|
digits := "0123456789"
|
2015-06-04 09:07:57 -07:00
|
|
|
// Disallow hexadecimal in series descriptions as the syntax is ambiguous.
|
2015-06-04 09:21:24 -07:00
|
|
|
if !l.seriesDesc && l.accept("0") && l.accept("xX") {
|
2015-03-30 09:12:51 -07:00
|
|
|
digits = "0123456789abcdefABCDEF"
|
|
|
|
}
|
|
|
|
l.acceptRun(digits)
|
|
|
|
if l.accept(".") {
|
|
|
|
l.acceptRun(digits)
|
|
|
|
}
|
|
|
|
if l.accept("eE") {
|
|
|
|
l.accept("+-")
|
|
|
|
l.acceptRun("0123456789")
|
|
|
|
}
|
2015-06-04 09:21:24 -07:00
|
|
|
// Next thing must not be alphanumeric unless it's the times token
|
|
|
|
// for series repetitions.
|
|
|
|
if r := l.peek(); (l.seriesDesc && r == 'x') || !isAlphaNumeric(r) {
|
|
|
|
return true
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
2015-06-04 09:21:24 -07:00
|
|
|
return false
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
2015-04-29 07:35:18 -07:00
|
|
|
// lexIdentifier scans an alphanumeric identifier. The next character
|
|
|
|
// is known to be a letter.
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexIdentifier(l *Lexer) stateFn {
|
2015-03-30 09:12:51 -07:00
|
|
|
for isAlphaNumeric(l.next()) {
|
|
|
|
// absorb
|
|
|
|
}
|
|
|
|
l.backup()
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(IDENTIFIER)
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
|
|
|
// lexKeywordOrIdentifier scans an alphanumeric identifier which may contain
|
2019-12-09 11:03:31 -08:00
|
|
|
// a colon rune. If the identifier is a keyword the respective keyword Item
|
2015-03-30 09:12:51 -07:00
|
|
|
// is scanned.
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexKeywordOrIdentifier(l *Lexer) stateFn {
|
2015-03-30 09:12:51 -07:00
|
|
|
Loop:
|
|
|
|
for {
|
|
|
|
switch r := l.next(); {
|
|
|
|
case isAlphaNumeric(r) || r == ':':
|
|
|
|
// absorb.
|
|
|
|
default:
|
|
|
|
l.backup()
|
|
|
|
word := l.input[l.start:l.pos]
|
|
|
|
if kw, ok := key[strings.ToLower(word)]; ok {
|
|
|
|
l.emit(kw)
|
|
|
|
} else if !strings.Contains(word, ":") {
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(IDENTIFIER)
|
2015-03-30 09:12:51 -07:00
|
|
|
} else {
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(METRIC_IDENTIFIER)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
break Loop
|
|
|
|
}
|
|
|
|
}
|
2015-05-11 05:04:53 -07:00
|
|
|
if l.seriesDesc && l.peek() != '{' {
|
|
|
|
return lexValueSequence
|
|
|
|
}
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
|
|
|
func isSpace(r rune) bool {
|
2015-06-02 09:33:49 -07:00
|
|
|
return r == ' ' || r == '\t' || r == '\n' || r == '\r'
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// isEndOfLine reports whether r is an end-of-line character.
|
|
|
|
func isEndOfLine(r rune) bool {
|
|
|
|
return r == '\r' || r == '\n'
|
|
|
|
}
|
|
|
|
|
|
|
|
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
|
|
|
|
func isAlphaNumeric(r rune) bool {
|
2015-07-28 17:11:13 -07:00
|
|
|
return isAlpha(r) || isDigit(r)
|
|
|
|
}
|
|
|
|
|
|
|
|
// isDigit reports whether r is a digit. Note: we cannot use unicode.IsDigit()
|
|
|
|
// instead because that also classifies non-Latin digits as digits. See
|
|
|
|
// https://github.com/prometheus/prometheus/issues/939.
|
|
|
|
func isDigit(r rune) bool {
|
|
|
|
return '0' <= r && r <= '9'
|
2015-05-11 02:45:23 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// isAlpha reports whether r is an alphabetic or underscore.
|
|
|
|
func isAlpha(r rune) bool {
|
|
|
|
return r == '_' || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z')
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
Fix parsing of label names which are also keywords
The current separation between lexer and parser is a bit fuzzy when it
comes to operators, aggregators and other keywords. The lexer already
tries to determine the type of a token, even though that type might
change depending on the context.
This led to the problematic behavior that no tokens known to the lexer
could be used as label names, including operators (and, by, ...),
aggregators (count, quantile, ...) or other keywords (for, offset, ...).
This change additionally checks whether an identifier is one of these
types. We might want to check whether the specific item identification
should be moved from the lexer to the parser.
2016-09-07 12:16:34 -07:00
|
|
|
|
|
|
|
// isLabel reports whether the string can be used as label.
|
|
|
|
func isLabel(s string) bool {
|
|
|
|
if len(s) == 0 || !isAlpha(rune(s[0])) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for _, c := range s[1:] {
|
|
|
|
if !isAlphaNumeric(c) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|