2015-03-30 09:12:51 -07:00
|
|
|
// Copyright 2015 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package promql
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"strings"
|
2015-09-30 12:27:08 -07:00
|
|
|
"unicode"
|
2015-03-30 09:12:51 -07:00
|
|
|
"unicode/utf8"
|
|
|
|
)
|
|
|
|
|
|
|
|
// item represents a token or text string returned from the scanner.
|
|
|
|
type item struct {
|
2018-03-08 08:52:44 -08:00
|
|
|
typ ItemType // The type of this item.
|
2015-03-30 09:12:51 -07:00
|
|
|
pos Pos // The starting position, in bytes, of this item in the input string.
|
|
|
|
val string // The value of this item.
|
|
|
|
}
|
|
|
|
|
|
|
|
// String returns a descriptive string for the item.
|
|
|
|
func (i item) String() string {
|
|
|
|
switch {
|
2019-03-14 12:53:55 -07:00
|
|
|
case i.typ == ItemEOF:
|
2015-03-30 09:12:51 -07:00
|
|
|
return "EOF"
|
2019-03-14 12:53:55 -07:00
|
|
|
case i.typ == ItemError:
|
2015-03-30 09:12:51 -07:00
|
|
|
return i.val
|
2019-03-14 12:53:55 -07:00
|
|
|
case i.typ == ItemIdentifier || i.typ == ItemMetricIdentifier:
|
2015-04-29 07:35:18 -07:00
|
|
|
return fmt.Sprintf("%q", i.val)
|
2015-03-30 09:12:51 -07:00
|
|
|
case i.typ.isKeyword():
|
|
|
|
return fmt.Sprintf("<%s>", i.val)
|
|
|
|
case i.typ.isOperator():
|
|
|
|
return fmt.Sprintf("<op:%s>", i.val)
|
|
|
|
case i.typ.isAggregator():
|
|
|
|
return fmt.Sprintf("<aggr:%s>", i.val)
|
|
|
|
case len(i.val) > 10:
|
|
|
|
return fmt.Sprintf("%.10q...", i.val)
|
|
|
|
}
|
|
|
|
return fmt.Sprintf("%q", i.val)
|
|
|
|
}
|
|
|
|
|
2016-04-02 15:52:18 -07:00
|
|
|
// isOperator returns true if the item corresponds to a arithmetic or set operator.
|
2015-03-30 09:12:51 -07:00
|
|
|
// Returns false otherwise.
|
2018-03-08 08:52:44 -08:00
|
|
|
func (i ItemType) isOperator() bool { return i > operatorsStart && i < operatorsEnd }
|
2015-03-30 09:12:51 -07:00
|
|
|
|
|
|
|
// isAggregator returns true if the item belongs to the aggregator functions.
|
|
|
|
// Returns false otherwise
|
2018-03-08 08:52:44 -08:00
|
|
|
func (i ItemType) isAggregator() bool { return i > aggregatorsStart && i < aggregatorsEnd }
|
2015-03-30 09:12:51 -07:00
|
|
|
|
2016-07-04 05:10:42 -07:00
|
|
|
// isAggregator returns true if the item is an aggregator that takes a parameter.
|
|
|
|
// Returns false otherwise
|
2018-03-08 08:52:44 -08:00
|
|
|
func (i ItemType) isAggregatorWithParam() bool {
|
2019-03-14 12:53:55 -07:00
|
|
|
return i == ItemTopK || i == ItemBottomK || i == ItemCountValues || i == ItemQuantile
|
2016-07-05 09:12:19 -07:00
|
|
|
}
|
2016-07-04 05:10:42 -07:00
|
|
|
|
2015-03-30 09:12:51 -07:00
|
|
|
// isKeyword returns true if the item corresponds to a keyword.
|
|
|
|
// Returns false otherwise.
|
2018-03-08 08:52:44 -08:00
|
|
|
func (i ItemType) isKeyword() bool { return i > keywordsStart && i < keywordsEnd }
|
2015-03-30 09:12:51 -07:00
|
|
|
|
2015-10-10 08:19:14 -07:00
|
|
|
// isCompairsonOperator returns true if the item corresponds to a comparison operator.
|
|
|
|
// Returns false otherwise.
|
2018-03-08 08:52:44 -08:00
|
|
|
func (i ItemType) isComparisonOperator() bool {
|
2015-10-10 08:19:14 -07:00
|
|
|
switch i {
|
2019-03-14 12:53:55 -07:00
|
|
|
case ItemEQL, ItemNEQ, ItemLTE, ItemLSS, ItemGTE, ItemGTR:
|
2015-10-10 08:19:14 -07:00
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-02 15:52:18 -07:00
|
|
|
// isSetOperator returns whether the item corresponds to a set operator.
|
2018-03-08 08:52:44 -08:00
|
|
|
func (i ItemType) isSetOperator() bool {
|
2016-04-02 15:52:18 -07:00
|
|
|
switch i {
|
2019-03-14 12:53:55 -07:00
|
|
|
case ItemLAND, ItemLOR, ItemLUnless:
|
2016-04-02 15:52:18 -07:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2016-05-11 05:20:36 -07:00
|
|
|
// LowestPrec is a constant for operator precedence in expressions.
|
2015-03-30 09:12:51 -07:00
|
|
|
const LowestPrec = 0 // Non-operators.
|
|
|
|
|
|
|
|
// Precedence returns the operator precedence of the binary
|
|
|
|
// operator op. If op is not a binary operator, the result
|
|
|
|
// is LowestPrec.
|
2018-03-08 08:52:44 -08:00
|
|
|
func (i ItemType) precedence() int {
|
2015-03-30 09:12:51 -07:00
|
|
|
switch i {
|
2019-03-14 12:53:55 -07:00
|
|
|
case ItemLOR:
|
2015-03-30 09:12:51 -07:00
|
|
|
return 1
|
2019-03-14 12:53:55 -07:00
|
|
|
case ItemLAND, ItemLUnless:
|
2015-03-30 09:12:51 -07:00
|
|
|
return 2
|
2019-03-14 12:53:55 -07:00
|
|
|
case ItemEQL, ItemNEQ, ItemLTE, ItemLSS, ItemGTE, ItemGTR:
|
2015-03-30 09:12:51 -07:00
|
|
|
return 3
|
2019-03-14 12:53:55 -07:00
|
|
|
case ItemADD, ItemSUB:
|
2015-03-30 09:12:51 -07:00
|
|
|
return 4
|
2019-03-14 12:53:55 -07:00
|
|
|
case ItemMUL, ItemDIV, ItemMOD:
|
2015-03-30 09:12:51 -07:00
|
|
|
return 5
|
2019-03-14 12:53:55 -07:00
|
|
|
case ItemPOW:
|
2016-05-29 02:06:14 -07:00
|
|
|
return 6
|
2015-03-30 09:12:51 -07:00
|
|
|
default:
|
|
|
|
return LowestPrec
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-08 08:52:44 -08:00
|
|
|
func (i ItemType) isRightAssociative() bool {
|
2016-05-29 02:06:14 -07:00
|
|
|
switch i {
|
2019-03-14 12:53:55 -07:00
|
|
|
case ItemPOW:
|
2016-05-29 02:06:14 -07:00
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-03-08 08:52:44 -08:00
|
|
|
type ItemType int
|
2015-03-30 09:12:51 -07:00
|
|
|
|
|
|
|
const (
|
2019-03-14 12:53:55 -07:00
|
|
|
ItemError ItemType = iota // Error occurred, value is error message
|
|
|
|
ItemEOF
|
|
|
|
ItemComment
|
|
|
|
ItemIdentifier
|
|
|
|
ItemMetricIdentifier
|
|
|
|
ItemLeftParen
|
|
|
|
ItemRightParen
|
|
|
|
ItemLeftBrace
|
|
|
|
ItemRightBrace
|
|
|
|
ItemLeftBracket
|
|
|
|
ItemRightBracket
|
|
|
|
ItemComma
|
|
|
|
ItemAssign
|
|
|
|
ItemColon
|
|
|
|
ItemSemicolon
|
|
|
|
ItemString
|
|
|
|
ItemNumber
|
|
|
|
ItemDuration
|
|
|
|
ItemBlank
|
|
|
|
ItemTimes
|
|
|
|
ItemSpace
|
2015-03-30 09:12:51 -07:00
|
|
|
|
|
|
|
operatorsStart
|
|
|
|
// Operators.
|
2019-03-14 12:53:55 -07:00
|
|
|
ItemSUB
|
|
|
|
ItemADD
|
|
|
|
ItemMUL
|
|
|
|
ItemMOD
|
|
|
|
ItemDIV
|
|
|
|
ItemLAND
|
|
|
|
ItemLOR
|
|
|
|
ItemLUnless
|
|
|
|
ItemEQL
|
|
|
|
ItemNEQ
|
|
|
|
ItemLTE
|
|
|
|
ItemLSS
|
|
|
|
ItemGTE
|
|
|
|
ItemGTR
|
|
|
|
ItemEQLRegex
|
|
|
|
ItemNEQRegex
|
|
|
|
ItemPOW
|
2015-03-30 09:12:51 -07:00
|
|
|
operatorsEnd
|
|
|
|
|
|
|
|
aggregatorsStart
|
|
|
|
// Aggregators.
|
2019-03-14 12:53:55 -07:00
|
|
|
ItemAvg
|
|
|
|
ItemCount
|
|
|
|
ItemSum
|
|
|
|
ItemMin
|
|
|
|
ItemMax
|
|
|
|
ItemStddev
|
|
|
|
ItemStdvar
|
|
|
|
ItemTopK
|
|
|
|
ItemBottomK
|
|
|
|
ItemCountValues
|
|
|
|
ItemQuantile
|
2015-03-30 09:12:51 -07:00
|
|
|
aggregatorsEnd
|
|
|
|
|
|
|
|
keywordsStart
|
|
|
|
// Keywords.
|
2019-03-14 12:53:55 -07:00
|
|
|
ItemOffset
|
|
|
|
ItemBy
|
|
|
|
ItemWithout
|
|
|
|
ItemOn
|
|
|
|
ItemIgnoring
|
|
|
|
ItemGroupLeft
|
|
|
|
ItemGroupRight
|
|
|
|
ItemBool
|
2015-03-30 09:12:51 -07:00
|
|
|
keywordsEnd
|
|
|
|
)
|
|
|
|
|
2018-03-08 08:52:44 -08:00
|
|
|
var key = map[string]ItemType{
|
2015-03-30 09:12:51 -07:00
|
|
|
// Operators.
|
2019-03-14 12:53:55 -07:00
|
|
|
"and": ItemLAND,
|
|
|
|
"or": ItemLOR,
|
|
|
|
"unless": ItemLUnless,
|
2015-03-30 09:12:51 -07:00
|
|
|
|
|
|
|
// Aggregators.
|
2019-03-14 12:53:55 -07:00
|
|
|
"sum": ItemSum,
|
|
|
|
"avg": ItemAvg,
|
|
|
|
"count": ItemCount,
|
|
|
|
"min": ItemMin,
|
|
|
|
"max": ItemMax,
|
|
|
|
"stddev": ItemStddev,
|
|
|
|
"stdvar": ItemStdvar,
|
|
|
|
"topk": ItemTopK,
|
|
|
|
"bottomk": ItemBottomK,
|
|
|
|
"count_values": ItemCountValues,
|
|
|
|
"quantile": ItemQuantile,
|
2015-03-30 09:12:51 -07:00
|
|
|
|
|
|
|
// Keywords.
|
2019-03-14 12:53:55 -07:00
|
|
|
"offset": ItemOffset,
|
|
|
|
"by": ItemBy,
|
|
|
|
"without": ItemWithout,
|
|
|
|
"on": ItemOn,
|
|
|
|
"ignoring": ItemIgnoring,
|
|
|
|
"group_left": ItemGroupLeft,
|
|
|
|
"group_right": ItemGroupRight,
|
|
|
|
"bool": ItemBool,
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// These are the default string representations for common items. It does not
|
|
|
|
// imply that those are the only character sequences that can be lexed to such an item.
|
2018-03-08 08:52:44 -08:00
|
|
|
var itemTypeStr = map[ItemType]string{
|
2019-03-14 12:53:55 -07:00
|
|
|
ItemLeftParen: "(",
|
|
|
|
ItemRightParen: ")",
|
|
|
|
ItemLeftBrace: "{",
|
|
|
|
ItemRightBrace: "}",
|
|
|
|
ItemLeftBracket: "[",
|
|
|
|
ItemRightBracket: "]",
|
|
|
|
ItemComma: ",",
|
|
|
|
ItemAssign: "=",
|
|
|
|
ItemColon: ":",
|
|
|
|
ItemSemicolon: ";",
|
|
|
|
ItemBlank: "_",
|
|
|
|
ItemTimes: "x",
|
|
|
|
ItemSpace: "<space>",
|
|
|
|
|
|
|
|
ItemSUB: "-",
|
|
|
|
ItemADD: "+",
|
|
|
|
ItemMUL: "*",
|
|
|
|
ItemMOD: "%",
|
|
|
|
ItemDIV: "/",
|
|
|
|
ItemEQL: "==",
|
|
|
|
ItemNEQ: "!=",
|
|
|
|
ItemLTE: "<=",
|
|
|
|
ItemLSS: "<",
|
|
|
|
ItemGTE: ">=",
|
|
|
|
ItemGTR: ">",
|
|
|
|
ItemEQLRegex: "=~",
|
|
|
|
ItemNEQRegex: "!~",
|
|
|
|
ItemPOW: "^",
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
// Add keywords to item type strings.
|
|
|
|
for s, ty := range key {
|
|
|
|
itemTypeStr[ty] = s
|
|
|
|
}
|
2015-05-12 01:39:10 -07:00
|
|
|
// Special numbers.
|
2019-03-14 12:53:55 -07:00
|
|
|
key["inf"] = ItemNumber
|
|
|
|
key["nan"] = ItemNumber
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
2018-03-08 08:52:44 -08:00
|
|
|
func (i ItemType) String() string {
|
2015-08-24 06:07:27 -07:00
|
|
|
if s, ok := itemTypeStr[i]; ok {
|
2015-03-30 09:12:51 -07:00
|
|
|
return s
|
|
|
|
}
|
2015-08-24 06:07:27 -07:00
|
|
|
return fmt.Sprintf("<item %d>", i)
|
2015-04-29 07:35:18 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (i item) desc() string {
|
|
|
|
if _, ok := itemTypeStr[i.typ]; ok {
|
|
|
|
return i.String()
|
|
|
|
}
|
2019-03-14 12:53:55 -07:00
|
|
|
if i.typ == ItemEOF {
|
2015-04-29 07:35:18 -07:00
|
|
|
return i.typ.desc()
|
|
|
|
}
|
|
|
|
return fmt.Sprintf("%s %s", i.typ.desc(), i)
|
|
|
|
}
|
|
|
|
|
2018-03-08 08:52:44 -08:00
|
|
|
func (i ItemType) desc() string {
|
2015-08-24 06:07:27 -07:00
|
|
|
switch i {
|
2019-03-14 12:53:55 -07:00
|
|
|
case ItemError:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "error"
|
2019-03-14 12:53:55 -07:00
|
|
|
case ItemEOF:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "end of input"
|
2019-03-14 12:53:55 -07:00
|
|
|
case ItemComment:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "comment"
|
2019-03-14 12:53:55 -07:00
|
|
|
case ItemIdentifier:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "identifier"
|
2019-03-14 12:53:55 -07:00
|
|
|
case ItemMetricIdentifier:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "metric identifier"
|
2019-03-14 12:53:55 -07:00
|
|
|
case ItemString:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "string"
|
2019-03-14 12:53:55 -07:00
|
|
|
case ItemNumber:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "number"
|
2019-03-14 12:53:55 -07:00
|
|
|
case ItemDuration:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "duration"
|
|
|
|
}
|
2015-08-24 06:07:27 -07:00
|
|
|
return fmt.Sprintf("%q", i)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
const eof = -1
|
|
|
|
|
|
|
|
// stateFn represents the state of the scanner as a function that returns the next state.
|
|
|
|
type stateFn func(*lexer) stateFn
|
|
|
|
|
|
|
|
// Pos is the position in a string.
|
|
|
|
type Pos int
|
|
|
|
|
|
|
|
// lexer holds the state of the scanner.
|
|
|
|
type lexer struct {
|
|
|
|
input string // The string being scanned.
|
|
|
|
state stateFn // The next lexing function to enter.
|
|
|
|
pos Pos // Current position in the input.
|
|
|
|
start Pos // Start position of this item.
|
|
|
|
width Pos // Width of last rune read from input.
|
|
|
|
lastPos Pos // Position of most recent item returned by nextItem.
|
|
|
|
items chan item // Channel of scanned items.
|
|
|
|
|
|
|
|
parenDepth int // Nesting depth of ( ) exprs.
|
|
|
|
braceOpen bool // Whether a { is opened.
|
|
|
|
bracketOpen bool // Whether a [ is opened.
|
2018-12-22 05:47:13 -08:00
|
|
|
gotColon bool // Whether we got a ':' after [ was opened.
|
2015-03-30 09:12:51 -07:00
|
|
|
stringOpen rune // Quote rune of the string currently being read.
|
2015-05-11 05:04:53 -07:00
|
|
|
|
|
|
|
// seriesDesc is set when a series description for the testing
|
|
|
|
// language is lexed.
|
|
|
|
seriesDesc bool
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// next returns the next rune in the input.
|
|
|
|
func (l *lexer) next() rune {
|
|
|
|
if int(l.pos) >= len(l.input) {
|
|
|
|
l.width = 0
|
|
|
|
return eof
|
|
|
|
}
|
|
|
|
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
|
|
|
|
l.width = Pos(w)
|
|
|
|
l.pos += l.width
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
|
|
|
// peek returns but does not consume the next rune in the input.
|
|
|
|
func (l *lexer) peek() rune {
|
|
|
|
r := l.next()
|
|
|
|
l.backup()
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
|
|
|
// backup steps back one rune. Can only be called once per call of next.
|
|
|
|
func (l *lexer) backup() {
|
|
|
|
l.pos -= l.width
|
|
|
|
}
|
|
|
|
|
|
|
|
// emit passes an item back to the client.
|
2018-03-08 08:52:44 -08:00
|
|
|
func (l *lexer) emit(t ItemType) {
|
2015-03-30 09:12:51 -07:00
|
|
|
l.items <- item{t, l.start, l.input[l.start:l.pos]}
|
|
|
|
l.start = l.pos
|
|
|
|
}
|
|
|
|
|
|
|
|
// ignore skips over the pending input before this point.
|
|
|
|
func (l *lexer) ignore() {
|
|
|
|
l.start = l.pos
|
|
|
|
}
|
|
|
|
|
|
|
|
// accept consumes the next rune if it's from the valid set.
|
|
|
|
func (l *lexer) accept(valid string) bool {
|
2016-04-01 01:35:00 -07:00
|
|
|
if strings.ContainsRune(valid, l.next()) {
|
2015-03-30 09:12:51 -07:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
l.backup()
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// acceptRun consumes a run of runes from the valid set.
|
|
|
|
func (l *lexer) acceptRun(valid string) {
|
2016-04-01 01:35:00 -07:00
|
|
|
for strings.ContainsRune(valid, l.next()) {
|
2015-03-30 09:12:51 -07:00
|
|
|
// consume
|
|
|
|
}
|
|
|
|
l.backup()
|
|
|
|
}
|
|
|
|
|
|
|
|
// lineNumber reports which line we're on, based on the position of
|
|
|
|
// the previous item returned by nextItem. Doing it this way
|
|
|
|
// means we don't have to worry about peek double counting.
|
|
|
|
func (l *lexer) lineNumber() int {
|
|
|
|
return 1 + strings.Count(l.input[:l.lastPos], "\n")
|
|
|
|
}
|
|
|
|
|
|
|
|
// linePosition reports at which character in the current line
|
|
|
|
// we are on.
|
2015-04-29 02:36:41 -07:00
|
|
|
func (l *lexer) linePosition() int {
|
|
|
|
lb := strings.LastIndex(l.input[:l.lastPos], "\n")
|
2015-03-30 09:12:51 -07:00
|
|
|
if lb == -1 {
|
2015-04-29 02:36:41 -07:00
|
|
|
return 1 + int(l.lastPos)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
2015-04-29 02:36:41 -07:00
|
|
|
return 1 + int(l.lastPos) - lb
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// errorf returns an error token and terminates the scan by passing
|
|
|
|
// back a nil pointer that will be the next state, terminating l.nextItem.
|
|
|
|
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
|
2019-03-14 12:53:55 -07:00
|
|
|
l.items <- item{ItemError, l.start, fmt.Sprintf(format, args...)}
|
2015-03-30 09:12:51 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// nextItem returns the next item from the input.
|
|
|
|
func (l *lexer) nextItem() item {
|
|
|
|
item := <-l.items
|
|
|
|
l.lastPos = item.pos
|
|
|
|
return item
|
|
|
|
}
|
|
|
|
|
|
|
|
// lex creates a new scanner for the input string.
|
2015-04-29 02:36:41 -07:00
|
|
|
func lex(input string) *lexer {
|
2015-03-30 09:12:51 -07:00
|
|
|
l := &lexer{
|
|
|
|
input: input,
|
|
|
|
items: make(chan item),
|
|
|
|
}
|
|
|
|
go l.run()
|
|
|
|
return l
|
|
|
|
}
|
|
|
|
|
|
|
|
// run runs the state machine for the lexer.
|
|
|
|
func (l *lexer) run() {
|
|
|
|
for l.state = lexStatements; l.state != nil; {
|
|
|
|
l.state = l.state(l)
|
|
|
|
}
|
|
|
|
close(l.items)
|
|
|
|
}
|
|
|
|
|
2018-11-12 10:47:13 -08:00
|
|
|
// Release resources used by lexer.
|
|
|
|
func (l *lexer) close() {
|
|
|
|
for range l.items {
|
|
|
|
// Consume.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-30 09:12:51 -07:00
|
|
|
// lineComment is the character that starts a line comment.
|
|
|
|
const lineComment = "#"
|
|
|
|
|
|
|
|
// lexStatements is the top-level state for lexing.
|
|
|
|
func lexStatements(l *lexer) stateFn {
|
|
|
|
if l.braceOpen {
|
|
|
|
return lexInsideBraces
|
|
|
|
}
|
|
|
|
if strings.HasPrefix(l.input[l.pos:], lineComment) {
|
|
|
|
return lexLineComment
|
|
|
|
}
|
|
|
|
|
|
|
|
switch r := l.next(); {
|
|
|
|
case r == eof:
|
|
|
|
if l.parenDepth != 0 {
|
|
|
|
return l.errorf("unclosed left parenthesis")
|
|
|
|
} else if l.bracketOpen {
|
|
|
|
return l.errorf("unclosed left bracket")
|
|
|
|
}
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemEOF)
|
2015-03-30 09:12:51 -07:00
|
|
|
return nil
|
|
|
|
case r == ',':
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemComma)
|
2015-03-30 09:12:51 -07:00
|
|
|
case isSpace(r):
|
|
|
|
return lexSpace
|
|
|
|
case r == '*':
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemMUL)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '/':
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemDIV)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '%':
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemMOD)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '+':
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemADD)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '-':
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemSUB)
|
2016-05-29 02:06:14 -07:00
|
|
|
case r == '^':
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemPOW)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '=':
|
|
|
|
if t := l.peek(); t == '=' {
|
|
|
|
l.next()
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemEQL)
|
2015-03-30 09:12:51 -07:00
|
|
|
} else if t == '~' {
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected character after '=': %q", t)
|
2015-03-30 09:12:51 -07:00
|
|
|
} else {
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemAssign)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
case r == '!':
|
|
|
|
if t := l.next(); t == '=' {
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemNEQ)
|
2015-03-30 09:12:51 -07:00
|
|
|
} else {
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected character after '!': %q", t)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
case r == '<':
|
|
|
|
if t := l.peek(); t == '=' {
|
|
|
|
l.next()
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemLTE)
|
2015-03-30 09:12:51 -07:00
|
|
|
} else {
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemLSS)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
case r == '>':
|
|
|
|
if t := l.peek(); t == '=' {
|
|
|
|
l.next()
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemGTE)
|
2015-03-30 09:12:51 -07:00
|
|
|
} else {
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemGTR)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
2015-07-28 17:11:13 -07:00
|
|
|
case isDigit(r) || (r == '.' && isDigit(l.peek())):
|
2015-03-30 09:12:51 -07:00
|
|
|
l.backup()
|
|
|
|
return lexNumberOrDuration
|
|
|
|
case r == '"' || r == '\'':
|
|
|
|
l.stringOpen = r
|
|
|
|
return lexString
|
2015-09-30 12:27:08 -07:00
|
|
|
case r == '`':
|
|
|
|
l.stringOpen = r
|
|
|
|
return lexRawString
|
2015-05-11 02:45:23 -07:00
|
|
|
case isAlpha(r) || r == ':':
|
2018-12-22 05:47:13 -08:00
|
|
|
if !l.bracketOpen {
|
|
|
|
l.backup()
|
|
|
|
return lexKeywordOrIdentifier
|
|
|
|
}
|
|
|
|
if l.gotColon {
|
|
|
|
return l.errorf("unexpected colon %q", r)
|
|
|
|
}
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemColon)
|
2018-12-22 05:47:13 -08:00
|
|
|
l.gotColon = true
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '(':
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemLeftParen)
|
2015-03-30 09:12:51 -07:00
|
|
|
l.parenDepth++
|
|
|
|
return lexStatements
|
|
|
|
case r == ')':
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemRightParen)
|
2015-03-30 09:12:51 -07:00
|
|
|
l.parenDepth--
|
|
|
|
if l.parenDepth < 0 {
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected right parenthesis %q", r)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
return lexStatements
|
|
|
|
case r == '{':
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemLeftBrace)
|
2015-03-30 09:12:51 -07:00
|
|
|
l.braceOpen = true
|
|
|
|
return lexInsideBraces(l)
|
|
|
|
case r == '[':
|
|
|
|
if l.bracketOpen {
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected left bracket %q", r)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
2018-12-22 05:47:13 -08:00
|
|
|
l.gotColon = false
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemLeftBracket)
|
2015-03-30 09:12:51 -07:00
|
|
|
l.bracketOpen = true
|
|
|
|
return lexDuration
|
|
|
|
case r == ']':
|
|
|
|
if !l.bracketOpen {
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected right bracket %q", r)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemRightBracket)
|
2015-03-30 09:12:51 -07:00
|
|
|
l.bracketOpen = false
|
|
|
|
|
|
|
|
default:
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected character: %q", r)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
|
|
|
// lexInsideBraces scans the inside of a vector selector. Keywords are ignored and
|
|
|
|
// scanned as identifiers.
|
|
|
|
func lexInsideBraces(l *lexer) stateFn {
|
|
|
|
if strings.HasPrefix(l.input[l.pos:], lineComment) {
|
|
|
|
return lexLineComment
|
|
|
|
}
|
|
|
|
|
|
|
|
switch r := l.next(); {
|
|
|
|
case r == eof:
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected end of input inside braces")
|
2015-03-30 09:12:51 -07:00
|
|
|
case isSpace(r):
|
|
|
|
return lexSpace
|
2015-05-11 02:45:23 -07:00
|
|
|
case isAlpha(r):
|
2015-03-30 09:12:51 -07:00
|
|
|
l.backup()
|
|
|
|
return lexIdentifier
|
|
|
|
case r == ',':
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemComma)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '"' || r == '\'':
|
|
|
|
l.stringOpen = r
|
|
|
|
return lexString
|
2015-09-30 12:27:08 -07:00
|
|
|
case r == '`':
|
|
|
|
l.stringOpen = r
|
|
|
|
return lexRawString
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '=':
|
|
|
|
if l.next() == '~' {
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemEQLRegex)
|
2015-03-30 09:12:51 -07:00
|
|
|
break
|
|
|
|
}
|
|
|
|
l.backup()
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemEQL)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '!':
|
|
|
|
switch nr := l.next(); {
|
|
|
|
case nr == '~':
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemNEQRegex)
|
2015-03-30 09:12:51 -07:00
|
|
|
case nr == '=':
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemNEQ)
|
2015-03-30 09:12:51 -07:00
|
|
|
default:
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected character after '!' inside braces: %q", nr)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
case r == '{':
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected left brace %q", r)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '}':
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemRightBrace)
|
2015-03-30 09:12:51 -07:00
|
|
|
l.braceOpen = false
|
2015-05-11 05:04:53 -07:00
|
|
|
|
|
|
|
if l.seriesDesc {
|
|
|
|
return lexValueSequence
|
|
|
|
}
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
default:
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected character inside braces: %q", r)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
return lexInsideBraces
|
|
|
|
}
|
|
|
|
|
2015-05-12 01:39:10 -07:00
|
|
|
// lexValueSequence scans a value sequence of a series description.
|
|
|
|
func lexValueSequence(l *lexer) stateFn {
|
|
|
|
switch r := l.next(); {
|
|
|
|
case r == eof:
|
|
|
|
return lexStatements
|
|
|
|
case isSpace(r):
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemSpace)
|
2015-05-12 01:39:10 -07:00
|
|
|
lexSpace(l)
|
|
|
|
case r == '+':
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemADD)
|
2015-05-12 01:39:10 -07:00
|
|
|
case r == '-':
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemSUB)
|
2015-05-12 01:39:10 -07:00
|
|
|
case r == 'x':
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemTimes)
|
2015-05-12 01:39:10 -07:00
|
|
|
case r == '_':
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemBlank)
|
2015-07-28 17:11:13 -07:00
|
|
|
case isDigit(r) || (r == '.' && isDigit(l.peek())):
|
2015-05-12 01:39:10 -07:00
|
|
|
l.backup()
|
|
|
|
lexNumber(l)
|
|
|
|
case isAlpha(r):
|
|
|
|
l.backup()
|
|
|
|
// We might lex invalid items here but this will be caught by the parser.
|
|
|
|
return lexKeywordOrIdentifier
|
|
|
|
default:
|
|
|
|
return l.errorf("unexpected character in series sequence: %q", r)
|
|
|
|
}
|
|
|
|
return lexValueSequence
|
|
|
|
}
|
|
|
|
|
2015-09-30 12:27:08 -07:00
|
|
|
// lexEscape scans a string escape sequence. The initial escaping character (\)
|
|
|
|
// has already been seen.
|
|
|
|
//
|
|
|
|
// NOTE: This function as well as the helper function digitVal() and associated
|
|
|
|
// tests have been adapted from the corresponding functions in the "go/scanner"
|
|
|
|
// package of the Go standard library to work for Prometheus-style strings.
|
|
|
|
// None of the actual escaping/quoting logic was changed in this function - it
|
|
|
|
// was only modified to integrate with our lexer.
|
|
|
|
func lexEscape(l *lexer) {
|
|
|
|
var n int
|
|
|
|
var base, max uint32
|
|
|
|
|
|
|
|
ch := l.next()
|
|
|
|
switch ch {
|
|
|
|
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', l.stringOpen:
|
|
|
|
return
|
|
|
|
case '0', '1', '2', '3', '4', '5', '6', '7':
|
|
|
|
n, base, max = 3, 8, 255
|
|
|
|
case 'x':
|
|
|
|
ch = l.next()
|
|
|
|
n, base, max = 2, 16, 255
|
|
|
|
case 'u':
|
|
|
|
ch = l.next()
|
|
|
|
n, base, max = 4, 16, unicode.MaxRune
|
|
|
|
case 'U':
|
|
|
|
ch = l.next()
|
|
|
|
n, base, max = 8, 16, unicode.MaxRune
|
|
|
|
case eof:
|
|
|
|
l.errorf("escape sequence not terminated")
|
|
|
|
default:
|
|
|
|
l.errorf("unknown escape sequence %#U", ch)
|
|
|
|
}
|
|
|
|
|
|
|
|
var x uint32
|
|
|
|
for n > 0 {
|
|
|
|
d := uint32(digitVal(ch))
|
|
|
|
if d >= base {
|
|
|
|
if ch == eof {
|
|
|
|
l.errorf("escape sequence not terminated")
|
|
|
|
}
|
|
|
|
l.errorf("illegal character %#U in escape sequence", ch)
|
|
|
|
}
|
|
|
|
x = x*base + d
|
|
|
|
ch = l.next()
|
|
|
|
n--
|
|
|
|
}
|
|
|
|
|
|
|
|
if x > max || 0xD800 <= x && x < 0xE000 {
|
|
|
|
l.errorf("escape sequence is an invalid Unicode code point")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// digitVal returns the digit value of a rune or 16 in case the rune does not
|
|
|
|
// represent a valid digit.
|
|
|
|
func digitVal(ch rune) int {
|
|
|
|
switch {
|
|
|
|
case '0' <= ch && ch <= '9':
|
|
|
|
return int(ch - '0')
|
|
|
|
case 'a' <= ch && ch <= 'f':
|
|
|
|
return int(ch - 'a' + 10)
|
|
|
|
case 'A' <= ch && ch <= 'F':
|
|
|
|
return int(ch - 'A' + 10)
|
|
|
|
}
|
|
|
|
return 16 // Larger than any legal digit val.
|
|
|
|
}
|
|
|
|
|
2015-03-30 09:12:51 -07:00
|
|
|
// lexString scans a quoted string. The initial quote has already been seen.
|
|
|
|
func lexString(l *lexer) stateFn {
|
|
|
|
Loop:
|
|
|
|
for {
|
|
|
|
switch l.next() {
|
|
|
|
case '\\':
|
2015-09-30 12:27:08 -07:00
|
|
|
lexEscape(l)
|
2017-06-16 07:19:24 -07:00
|
|
|
case utf8.RuneError:
|
|
|
|
return l.errorf("invalid UTF-8 rune")
|
2015-03-30 09:12:51 -07:00
|
|
|
case eof, '\n':
|
|
|
|
return l.errorf("unterminated quoted string")
|
|
|
|
case l.stringOpen:
|
|
|
|
break Loop
|
|
|
|
}
|
|
|
|
}
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemString)
|
2015-09-30 12:27:08 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
|
|
|
// lexRawString scans a raw quoted string. The initial quote has already been seen.
|
|
|
|
func lexRawString(l *lexer) stateFn {
|
|
|
|
Loop:
|
|
|
|
for {
|
|
|
|
switch l.next() {
|
2017-06-16 07:19:24 -07:00
|
|
|
case utf8.RuneError:
|
|
|
|
return l.errorf("invalid UTF-8 rune")
|
2015-09-30 12:27:08 -07:00
|
|
|
case eof:
|
|
|
|
return l.errorf("unterminated raw string")
|
|
|
|
case l.stringOpen:
|
|
|
|
break Loop
|
|
|
|
}
|
|
|
|
}
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemString)
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
|
|
|
// lexSpace scans a run of space characters. One space has already been seen.
|
|
|
|
func lexSpace(l *lexer) stateFn {
|
|
|
|
for isSpace(l.peek()) {
|
|
|
|
l.next()
|
|
|
|
}
|
|
|
|
l.ignore()
|
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
|
|
|
// lexLineComment scans a line comment. Left comment marker is known to be present.
|
|
|
|
func lexLineComment(l *lexer) stateFn {
|
|
|
|
l.pos += Pos(len(lineComment))
|
|
|
|
for r := l.next(); !isEndOfLine(r) && r != eof; {
|
|
|
|
r = l.next()
|
|
|
|
}
|
|
|
|
l.backup()
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemComment)
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
|
|
|
func lexDuration(l *lexer) stateFn {
|
|
|
|
if l.scanNumber() {
|
|
|
|
return l.errorf("missing unit character in duration")
|
|
|
|
}
|
|
|
|
// Next two chars must be a valid unit and a non-alphanumeric.
|
2015-04-29 07:35:18 -07:00
|
|
|
if l.accept("smhdwy") {
|
|
|
|
if isAlphaNumeric(l.next()) {
|
|
|
|
return l.errorf("bad duration syntax: %q", l.input[l.start:l.pos])
|
|
|
|
}
|
|
|
|
l.backup()
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemDuration)
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
return l.errorf("bad duration syntax: %q", l.input[l.start:l.pos])
|
|
|
|
}
|
|
|
|
|
|
|
|
// lexNumber scans a number: decimal, hex, oct or float.
|
|
|
|
func lexNumber(l *lexer) stateFn {
|
|
|
|
if !l.scanNumber() {
|
|
|
|
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
|
|
|
|
}
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemNumber)
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
|
|
|
// lexNumberOrDuration scans a number or a duration item.
|
|
|
|
func lexNumberOrDuration(l *lexer) stateFn {
|
|
|
|
if l.scanNumber() {
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemNumber)
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
// Next two chars must be a valid unit and a non-alphanumeric.
|
2015-04-29 07:35:18 -07:00
|
|
|
if l.accept("smhdwy") {
|
|
|
|
if isAlphaNumeric(l.next()) {
|
|
|
|
return l.errorf("bad number or duration syntax: %q", l.input[l.start:l.pos])
|
|
|
|
}
|
|
|
|
l.backup()
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemDuration)
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
return l.errorf("bad number or duration syntax: %q", l.input[l.start:l.pos])
|
|
|
|
}
|
|
|
|
|
|
|
|
// scanNumber scans numbers of different formats. The scanned item is
|
|
|
|
// not necessarily a valid number. This case is caught by the parser.
|
|
|
|
func (l *lexer) scanNumber() bool {
|
|
|
|
digits := "0123456789"
|
2015-06-04 09:07:57 -07:00
|
|
|
// Disallow hexadecimal in series descriptions as the syntax is ambiguous.
|
2015-06-04 09:21:24 -07:00
|
|
|
if !l.seriesDesc && l.accept("0") && l.accept("xX") {
|
2015-03-30 09:12:51 -07:00
|
|
|
digits = "0123456789abcdefABCDEF"
|
|
|
|
}
|
|
|
|
l.acceptRun(digits)
|
|
|
|
if l.accept(".") {
|
|
|
|
l.acceptRun(digits)
|
|
|
|
}
|
|
|
|
if l.accept("eE") {
|
|
|
|
l.accept("+-")
|
|
|
|
l.acceptRun("0123456789")
|
|
|
|
}
|
2015-06-04 09:21:24 -07:00
|
|
|
// Next thing must not be alphanumeric unless it's the times token
|
|
|
|
// for series repetitions.
|
|
|
|
if r := l.peek(); (l.seriesDesc && r == 'x') || !isAlphaNumeric(r) {
|
|
|
|
return true
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
2015-06-04 09:21:24 -07:00
|
|
|
return false
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
2015-04-29 07:35:18 -07:00
|
|
|
// lexIdentifier scans an alphanumeric identifier. The next character
|
|
|
|
// is known to be a letter.
|
2015-03-30 09:12:51 -07:00
|
|
|
func lexIdentifier(l *lexer) stateFn {
|
|
|
|
for isAlphaNumeric(l.next()) {
|
|
|
|
// absorb
|
|
|
|
}
|
|
|
|
l.backup()
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemIdentifier)
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
|
|
|
// lexKeywordOrIdentifier scans an alphanumeric identifier which may contain
|
|
|
|
// a colon rune. If the identifier is a keyword the respective keyword item
|
|
|
|
// is scanned.
|
|
|
|
func lexKeywordOrIdentifier(l *lexer) stateFn {
|
|
|
|
Loop:
|
|
|
|
for {
|
|
|
|
switch r := l.next(); {
|
|
|
|
case isAlphaNumeric(r) || r == ':':
|
|
|
|
// absorb.
|
|
|
|
default:
|
|
|
|
l.backup()
|
|
|
|
word := l.input[l.start:l.pos]
|
|
|
|
if kw, ok := key[strings.ToLower(word)]; ok {
|
|
|
|
l.emit(kw)
|
|
|
|
} else if !strings.Contains(word, ":") {
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemIdentifier)
|
2015-03-30 09:12:51 -07:00
|
|
|
} else {
|
2019-03-14 12:53:55 -07:00
|
|
|
l.emit(ItemMetricIdentifier)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
break Loop
|
|
|
|
}
|
|
|
|
}
|
2015-05-11 05:04:53 -07:00
|
|
|
if l.seriesDesc && l.peek() != '{' {
|
|
|
|
return lexValueSequence
|
|
|
|
}
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
|
|
|
func isSpace(r rune) bool {
|
2015-06-02 09:33:49 -07:00
|
|
|
return r == ' ' || r == '\t' || r == '\n' || r == '\r'
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// isEndOfLine reports whether r is an end-of-line character.
|
|
|
|
func isEndOfLine(r rune) bool {
|
|
|
|
return r == '\r' || r == '\n'
|
|
|
|
}
|
|
|
|
|
|
|
|
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
|
|
|
|
func isAlphaNumeric(r rune) bool {
|
2015-07-28 17:11:13 -07:00
|
|
|
return isAlpha(r) || isDigit(r)
|
|
|
|
}
|
|
|
|
|
|
|
|
// isDigit reports whether r is a digit. Note: we cannot use unicode.IsDigit()
|
|
|
|
// instead because that also classifies non-Latin digits as digits. See
|
|
|
|
// https://github.com/prometheus/prometheus/issues/939.
|
|
|
|
func isDigit(r rune) bool {
|
|
|
|
return '0' <= r && r <= '9'
|
2015-05-11 02:45:23 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// isAlpha reports whether r is an alphabetic or underscore.
|
|
|
|
func isAlpha(r rune) bool {
|
|
|
|
return r == '_' || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z')
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
Fix parsing of label names which are also keywords
The current separation between lexer and parser is a bit fuzzy when it
comes to operators, aggregators and other keywords. The lexer already
tries to determine the type of a token, even though that type might
change depending on the context.
This led to the problematic behavior that no tokens known to the lexer
could be used as label names, including operators (and, by, ...),
aggregators (count, quantile, ...) or other keywords (for, offset, ...).
This change additionally checks whether an identifier is one of these
types. We might want to check whether the specific item identification
should be moved from the lexer to the parser.
2016-09-07 12:16:34 -07:00
|
|
|
|
|
|
|
// isLabel reports whether the string can be used as label.
|
|
|
|
func isLabel(s string) bool {
|
|
|
|
if len(s) == 0 || !isAlpha(rune(s[0])) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for _, c := range s[1:] {
|
|
|
|
if !isAlphaNumeric(c) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|