PromQL: export lexer (#6435)

Signed-off-by: Tobias Guggenmos <tguggenm@redhat.com>
This commit is contained in:
Tobias Guggenmos 2019-12-09 19:03:31 +00:00 committed by Brian Brazil
parent 35c1f31721
commit 5c503d85f7
5 changed files with 301 additions and 301 deletions

View file

@ -21,7 +21,7 @@
%union {
node Node
item item
item Item
matchers []*labels.Matcher
matcher *labels.Matcher
}
@ -148,8 +148,8 @@ match_op :
| EQL_REGEX {$$=$1}
| NEQ_REGEX {$$=$1}
| error
{ yylex.(*parser).errorf("expected label matching operator but got %s", yylex.(*parser).token.val) }
{ yylex.(*parser).errorf("expected label matching operator but got %s", yylex.(*parser).token.Val) }
;
%%
%%

View file

@ -15,7 +15,7 @@ import (
type yySymType struct {
yys int
node Node
item item
item Item
matchers []*labels.Matcher
matcher *labels.Matcher
}
@ -650,7 +650,7 @@ yydefault:
yyDollar = yyS[yypt-1 : yypt+1]
//line promql/generated_parser.y:151
{
yylex.(*parser).errorf("expected label matching operator but got %s", yylex.(*parser).token.val)
yylex.(*parser).errorf("expected label matching operator but got %s", yylex.(*parser).token.Val)
}
}
goto yystack /* stack new state and value */

View file

@ -20,53 +20,53 @@ import (
"unicode/utf8"
)
// item represents a token or text string returned from the scanner.
type item struct {
typ ItemType // The type of this item.
pos Pos // The starting position, in bytes, of this item in the input string.
val string // The value of this item.
// Item represents a token or text string returned from the scanner.
type Item struct {
Typ ItemType // The type of this Item.
Pos Pos // The starting position, in bytes, of this Item in the input string.
Val string // The value of this Item.
}
// String returns a descriptive string for the item.
func (i item) String() string {
// String returns a descriptive string for the Item.
func (i Item) String() string {
switch {
case i.typ == EOF:
case i.Typ == EOF:
return "EOF"
case i.typ == ERROR:
return i.val
case i.typ == IDENTIFIER || i.typ == METRIC_IDENTIFIER:
return fmt.Sprintf("%q", i.val)
case i.typ.isKeyword():
return fmt.Sprintf("<%s>", i.val)
case i.typ.isOperator():
return fmt.Sprintf("<op:%s>", i.val)
case i.typ.isAggregator():
return fmt.Sprintf("<aggr:%s>", i.val)
case len(i.val) > 10:
return fmt.Sprintf("%.10q...", i.val)
case i.Typ == ERROR:
return i.Val
case i.Typ == IDENTIFIER || i.Typ == METRIC_IDENTIFIER:
return fmt.Sprintf("%q", i.Val)
case i.Typ.isKeyword():
return fmt.Sprintf("<%s>", i.Val)
case i.Typ.isOperator():
return fmt.Sprintf("<op:%s>", i.Val)
case i.Typ.isAggregator():
return fmt.Sprintf("<aggr:%s>", i.Val)
case len(i.Val) > 10:
return fmt.Sprintf("%.10q...", i.Val)
}
return fmt.Sprintf("%q", i.val)
return fmt.Sprintf("%q", i.Val)
}
// isOperator returns true if the item corresponds to a arithmetic or set operator.
// isOperator returns true if the Item corresponds to a arithmetic or set operator.
// Returns false otherwise.
func (i ItemType) isOperator() bool { return i > operatorsStart && i < operatorsEnd }
// isAggregator returns true if the item belongs to the aggregator functions.
// isAggregator returns true if the Item belongs to the aggregator functions.
// Returns false otherwise
func (i ItemType) isAggregator() bool { return i > aggregatorsStart && i < aggregatorsEnd }
// isAggregator returns true if the item is an aggregator that takes a parameter.
// isAggregator returns true if the Item is an aggregator that takes a parameter.
// Returns false otherwise
func (i ItemType) isAggregatorWithParam() bool {
return i == TOPK || i == BOTTOMK || i == COUNT_VALUES || i == QUANTILE
}
// isKeyword returns true if the item corresponds to a keyword.
// isKeyword returns true if the Item corresponds to a keyword.
// Returns false otherwise.
func (i ItemType) isKeyword() bool { return i > keywordsStart && i < keywordsEnd }
// isComparisonOperator returns true if the item corresponds to a comparison operator.
// isComparisonOperator returns true if the Item corresponds to a comparison operator.
// Returns false otherwise.
func (i ItemType) isComparisonOperator() bool {
switch i {
@ -77,7 +77,7 @@ func (i ItemType) isComparisonOperator() bool {
}
}
// isSetOperator returns whether the item corresponds to a set operator.
// isSetOperator returns whether the Item corresponds to a set operator.
func (i ItemType) isSetOperator() bool {
switch i {
case LAND, LOR, LUNLESS:
@ -153,9 +153,9 @@ var key = map[string]ItemType{
"bool": BOOL,
}
// These are the default string representations for common items. It does not
// imply that those are the only character sequences that can be lexed to such an item.
var itemTypeStr = map[ItemType]string{
// These are the default string representations for common Items. It does not
// imply that those are the only character sequences that can be lexed to such an Item.
var ItemTypeStr = map[ItemType]string{
LEFT_PAREN: "(",
RIGHT_PAREN: ")",
LEFT_BRACE: "{",
@ -187,9 +187,9 @@ var itemTypeStr = map[ItemType]string{
}
func init() {
// Add keywords to item type strings.
// Add keywords to Item type strings.
for s, ty := range key {
itemTypeStr[ty] = s
ItemTypeStr[ty] = s
}
// Special numbers.
key["inf"] = NUMBER
@ -197,20 +197,20 @@ func init() {
}
func (i ItemType) String() string {
if s, ok := itemTypeStr[i]; ok {
if s, ok := ItemTypeStr[i]; ok {
return s
}
return fmt.Sprintf("<item %d>", i)
return fmt.Sprintf("<Item %d>", i)
}
func (i item) desc() string {
if _, ok := itemTypeStr[i.typ]; ok {
func (i Item) desc() string {
if _, ok := ItemTypeStr[i.Typ]; ok {
return i.String()
}
if i.typ == EOF {
return i.typ.desc()
if i.Typ == EOF {
return i.Typ.desc()
}
return fmt.Sprintf("%s %s", i.typ.desc(), i)
return fmt.Sprintf("%s %s", i.Typ.desc(), i)
}
func (i ItemType) desc() string {
@ -238,20 +238,20 @@ func (i ItemType) desc() string {
const eof = -1
// stateFn represents the state of the scanner as a function that returns the next state.
type stateFn func(*lexer) stateFn
type stateFn func(*Lexer) stateFn
// Pos is the position in a string.
type Pos int
// lexer holds the state of the scanner.
type lexer struct {
// Lexer holds the state of the scanner.
type Lexer struct {
input string // The string being scanned.
state stateFn // The next lexing function to enter.
pos Pos // Current position in the input.
start Pos // Start position of this item.
start Pos // Start position of this Item.
width Pos // Width of last rune read from input.
lastPos Pos // Position of most recent item returned by nextItem.
items []item // Slice buffer of scanned items.
lastPos Pos // Position of most recent Item returned by NextItem.
Items []Item // Slice buffer of scanned Items.
parenDepth int // Nesting depth of ( ) exprs.
braceOpen bool // Whether a { is opened.
@ -265,7 +265,7 @@ type lexer struct {
}
// next returns the next rune in the input.
func (l *lexer) next() rune {
func (l *Lexer) next() rune {
if int(l.pos) >= len(l.input) {
l.width = 0
return eof
@ -277,30 +277,30 @@ func (l *lexer) next() rune {
}
// peek returns but does not consume the next rune in the input.
func (l *lexer) peek() rune {
func (l *Lexer) peek() rune {
r := l.next()
l.backup()
return r
}
// backup steps back one rune. Can only be called once per call of next.
func (l *lexer) backup() {
func (l *Lexer) backup() {
l.pos -= l.width
}
// emit passes an item back to the client.
func (l *lexer) emit(t ItemType) {
l.items = append(l.items, item{t, l.start, l.input[l.start:l.pos]})
// emit passes an Item back to the client.
func (l *Lexer) emit(t ItemType) {
l.Items = append(l.Items, Item{t, l.start, l.input[l.start:l.pos]})
l.start = l.pos
}
// ignore skips over the pending input before this point.
func (l *lexer) ignore() {
func (l *Lexer) ignore() {
l.start = l.pos
}
// accept consumes the next rune if it's from the valid set.
func (l *lexer) accept(valid string) bool {
func (l *Lexer) accept(valid string) bool {
if strings.ContainsRune(valid, l.next()) {
return true
}
@ -309,7 +309,7 @@ func (l *lexer) accept(valid string) bool {
}
// acceptRun consumes a run of runes from the valid set.
func (l *lexer) acceptRun(valid string) {
func (l *Lexer) acceptRun(valid string) {
for strings.ContainsRune(valid, l.next()) {
// consume
}
@ -317,15 +317,15 @@ func (l *lexer) acceptRun(valid string) {
}
// lineNumber reports which line we're on, based on the position of
// the previous item returned by nextItem. Doing it this way
// the previous Item returned by NextItem. Doing it this way
// means we don't have to worry about peek double counting.
func (l *lexer) lineNumber() int {
func (l *Lexer) lineNumber() int {
return 1 + strings.Count(l.input[:l.lastPos], "\n")
}
// linePosition reports at which character in the current line
// we are on.
func (l *lexer) linePosition() int {
func (l *Lexer) linePosition() int {
lb := strings.LastIndex(l.input[:l.lastPos], "\n")
if lb == -1 {
return 1 + int(l.lastPos)
@ -334,30 +334,30 @@ func (l *lexer) linePosition() int {
}
// errorf returns an error token and terminates the scan by passing
// back a nil pointer that will be the next state, terminating l.nextItem.
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
l.items = append(l.items, item{ERROR, l.start, fmt.Sprintf(format, args...)})
// back a nil pointer that will be the next state, terminating l.NextItem.
func (l *Lexer) errorf(format string, args ...interface{}) stateFn {
l.Items = append(l.Items, Item{ERROR, l.start, fmt.Sprintf(format, args...)})
return nil
}
// nextItem returns the next item from the input.
func (l *lexer) nextItem() item {
for len(l.items) == 0 {
// NextItem returns the next Item from the input.
func (l *Lexer) NextItem() Item {
for len(l.Items) == 0 {
if l.state != nil {
l.state = l.state(l)
} else {
l.emit(EOF)
}
}
item := l.items[0]
l.items = l.items[1:]
l.lastPos = item.pos
return item
Item := l.Items[0]
l.Items = l.Items[1:]
l.lastPos = Item.Pos
return Item
}
// lex creates a new scanner for the input string.
func lex(input string) *lexer {
l := &lexer{
func Lex(input string) *Lexer {
l := &Lexer{
input: input,
state: lexStatements,
}
@ -365,15 +365,15 @@ func lex(input string) *lexer {
}
// run runs the state machine for the lexer.
func (l *lexer) run() {
func (l *Lexer) run() {
for l.state = lexStatements; l.state != nil; {
l.state = l.state(l)
}
}
// Release resources used by lexer.
func (l *lexer) close() {
for range l.items {
func (l *Lexer) close() {
for range l.Items {
// Consume.
}
}
@ -382,7 +382,7 @@ func (l *lexer) close() {
const lineComment = "#"
// lexStatements is the top-level state for lexing.
func lexStatements(l *lexer) stateFn {
func lexStatements(l *Lexer) stateFn {
if l.braceOpen {
return lexInsideBraces
}
@ -504,7 +504,7 @@ func lexStatements(l *lexer) stateFn {
// lexInsideBraces scans the inside of a vector selector. Keywords are ignored and
// scanned as identifiers.
func lexInsideBraces(l *lexer) stateFn {
func lexInsideBraces(l *Lexer) stateFn {
if strings.HasPrefix(l.input[l.pos:], lineComment) {
return lexLineComment
}
@ -558,7 +558,7 @@ func lexInsideBraces(l *lexer) stateFn {
}
// lexValueSequence scans a value sequence of a series description.
func lexValueSequence(l *lexer) stateFn {
func lexValueSequence(l *Lexer) stateFn {
switch r := l.next(); {
case r == eof:
return lexStatements
@ -578,7 +578,7 @@ func lexValueSequence(l *lexer) stateFn {
lexNumber(l)
case isAlpha(r):
l.backup()
// We might lex invalid items here but this will be caught by the parser.
// We might lex invalid Items here but this will be caught by the parser.
return lexKeywordOrIdentifier
default:
return l.errorf("unexpected character in series sequence: %q", r)
@ -594,7 +594,7 @@ func lexValueSequence(l *lexer) stateFn {
// package of the Go standard library to work for Prometheus-style strings.
// None of the actual escaping/quoting logic was changed in this function - it
// was only modified to integrate with our lexer.
func lexEscape(l *lexer) {
func lexEscape(l *Lexer) {
var n int
var base, max uint32
@ -653,7 +653,7 @@ func digitVal(ch rune) int {
}
// skipSpaces skips the spaces until a non-space is encountered.
func skipSpaces(l *lexer) {
func skipSpaces(l *Lexer) {
for isSpace(l.peek()) {
l.next()
}
@ -661,7 +661,7 @@ func skipSpaces(l *lexer) {
}
// lexString scans a quoted string. The initial quote has already been seen.
func lexString(l *lexer) stateFn {
func lexString(l *Lexer) stateFn {
Loop:
for {
switch l.next() {
@ -680,7 +680,7 @@ Loop:
}
// lexRawString scans a raw quoted string. The initial quote has already been seen.
func lexRawString(l *lexer) stateFn {
func lexRawString(l *Lexer) stateFn {
Loop:
for {
switch l.next() {
@ -697,7 +697,7 @@ Loop:
}
// lexSpace scans a run of space characters. One space has already been seen.
func lexSpace(l *lexer) stateFn {
func lexSpace(l *Lexer) stateFn {
for isSpace(l.peek()) {
l.next()
}
@ -706,7 +706,7 @@ func lexSpace(l *lexer) stateFn {
}
// lexLineComment scans a line comment. Left comment marker is known to be present.
func lexLineComment(l *lexer) stateFn {
func lexLineComment(l *Lexer) stateFn {
l.pos += Pos(len(lineComment))
for r := l.next(); !isEndOfLine(r) && r != eof; {
r = l.next()
@ -716,7 +716,7 @@ func lexLineComment(l *lexer) stateFn {
return lexStatements
}
func lexDuration(l *lexer) stateFn {
func lexDuration(l *Lexer) stateFn {
if l.scanNumber() {
return l.errorf("missing unit character in duration")
}
@ -733,7 +733,7 @@ func lexDuration(l *lexer) stateFn {
}
// lexNumber scans a number: decimal, hex, oct or float.
func lexNumber(l *lexer) stateFn {
func lexNumber(l *Lexer) stateFn {
if !l.scanNumber() {
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
}
@ -741,8 +741,8 @@ func lexNumber(l *lexer) stateFn {
return lexStatements
}
// lexNumberOrDuration scans a number or a duration item.
func lexNumberOrDuration(l *lexer) stateFn {
// lexNumberOrDuration scans a number or a duration Item.
func lexNumberOrDuration(l *Lexer) stateFn {
if l.scanNumber() {
l.emit(NUMBER)
return lexStatements
@ -759,9 +759,9 @@ func lexNumberOrDuration(l *lexer) stateFn {
return l.errorf("bad number or duration syntax: %q", l.input[l.start:l.pos])
}
// scanNumber scans numbers of different formats. The scanned item is
// scanNumber scans numbers of different formats. The scanned Item is
// not necessarily a valid number. This case is caught by the parser.
func (l *lexer) scanNumber() bool {
func (l *Lexer) scanNumber() bool {
digits := "0123456789"
// Disallow hexadecimal in series descriptions as the syntax is ambiguous.
if !l.seriesDesc && l.accept("0") && l.accept("xX") {
@ -785,7 +785,7 @@ func (l *lexer) scanNumber() bool {
// lexIdentifier scans an alphanumeric identifier. The next character
// is known to be a letter.
func lexIdentifier(l *lexer) stateFn {
func lexIdentifier(l *Lexer) stateFn {
for isAlphaNumeric(l.next()) {
// absorb
}
@ -795,9 +795,9 @@ func lexIdentifier(l *lexer) stateFn {
}
// lexKeywordOrIdentifier scans an alphanumeric identifier which may contain
// a colon rune. If the identifier is a keyword the respective keyword item
// a colon rune. If the identifier is a keyword the respective keyword Item
// is scanned.
func lexKeywordOrIdentifier(l *lexer) stateFn {
func lexKeywordOrIdentifier(l *Lexer) stateFn {
Loop:
for {
switch r := l.next(); {

View file

@ -21,7 +21,7 @@ import (
type testCase struct {
input string
expected []item
expected []Item
fail bool
seriesDesc bool // Whether to lex a series description.
}
@ -35,44 +35,44 @@ var tests = []struct {
tests: []testCase{
{
input: ",",
expected: []item{{COMMA, 0, ","}},
expected: []Item{{COMMA, 0, ","}},
}, {
input: "()",
expected: []item{{LEFT_PAREN, 0, `(`}, {RIGHT_PAREN, 1, `)`}},
expected: []Item{{LEFT_PAREN, 0, `(`}, {RIGHT_PAREN, 1, `)`}},
}, {
input: "{}",
expected: []item{{LEFT_BRACE, 0, `{`}, {RIGHT_BRACE, 1, `}`}},
expected: []Item{{LEFT_BRACE, 0, `{`}, {RIGHT_BRACE, 1, `}`}},
}, {
input: "[5m]",
expected: []item{
expected: []Item{
{LEFT_BRACKET, 0, `[`},
{DURATION, 1, `5m`},
{RIGHT_BRACKET, 3, `]`},
},
}, {
input: "[ 5m]",
expected: []item{
expected: []Item{
{LEFT_BRACKET, 0, `[`},
{DURATION, 2, `5m`},
{RIGHT_BRACKET, 4, `]`},
},
}, {
input: "[ 5m]",
expected: []item{
expected: []Item{
{LEFT_BRACKET, 0, `[`},
{DURATION, 3, `5m`},
{RIGHT_BRACKET, 5, `]`},
},
}, {
input: "[ 5m ]",
expected: []item{
expected: []Item{
{LEFT_BRACKET, 0, `[`},
{DURATION, 3, `5m`},
{RIGHT_BRACKET, 6, `]`},
},
}, {
input: "\r\n\r",
expected: []item{},
expected: []Item{},
},
},
},
@ -81,55 +81,55 @@ var tests = []struct {
tests: []testCase{
{
input: "1",
expected: []item{{NUMBER, 0, "1"}},
expected: []Item{{NUMBER, 0, "1"}},
}, {
input: "4.23",
expected: []item{{NUMBER, 0, "4.23"}},
expected: []Item{{NUMBER, 0, "4.23"}},
}, {
input: ".3",
expected: []item{{NUMBER, 0, ".3"}},
expected: []Item{{NUMBER, 0, ".3"}},
}, {
input: "5.",
expected: []item{{NUMBER, 0, "5."}},
expected: []Item{{NUMBER, 0, "5."}},
}, {
input: "NaN",
expected: []item{{NUMBER, 0, "NaN"}},
expected: []Item{{NUMBER, 0, "NaN"}},
}, {
input: "nAN",
expected: []item{{NUMBER, 0, "nAN"}},
expected: []Item{{NUMBER, 0, "nAN"}},
}, {
input: "NaN 123",
expected: []item{{NUMBER, 0, "NaN"}, {NUMBER, 4, "123"}},
expected: []Item{{NUMBER, 0, "NaN"}, {NUMBER, 4, "123"}},
}, {
input: "NaN123",
expected: []item{{IDENTIFIER, 0, "NaN123"}},
expected: []Item{{IDENTIFIER, 0, "NaN123"}},
}, {
input: "iNf",
expected: []item{{NUMBER, 0, "iNf"}},
expected: []Item{{NUMBER, 0, "iNf"}},
}, {
input: "Inf",
expected: []item{{NUMBER, 0, "Inf"}},
expected: []Item{{NUMBER, 0, "Inf"}},
}, {
input: "+Inf",
expected: []item{{ADD, 0, "+"}, {NUMBER, 1, "Inf"}},
expected: []Item{{ADD, 0, "+"}, {NUMBER, 1, "Inf"}},
}, {
input: "+Inf 123",
expected: []item{{ADD, 0, "+"}, {NUMBER, 1, "Inf"}, {NUMBER, 5, "123"}},
expected: []Item{{ADD, 0, "+"}, {NUMBER, 1, "Inf"}, {NUMBER, 5, "123"}},
}, {
input: "-Inf",
expected: []item{{SUB, 0, "-"}, {NUMBER, 1, "Inf"}},
expected: []Item{{SUB, 0, "-"}, {NUMBER, 1, "Inf"}},
}, {
input: "Infoo",
expected: []item{{IDENTIFIER, 0, "Infoo"}},
expected: []Item{{IDENTIFIER, 0, "Infoo"}},
}, {
input: "-Infoo",
expected: []item{{SUB, 0, "-"}, {IDENTIFIER, 1, "Infoo"}},
expected: []Item{{SUB, 0, "-"}, {IDENTIFIER, 1, "Infoo"}},
}, {
input: "-Inf 123",
expected: []item{{SUB, 0, "-"}, {NUMBER, 1, "Inf"}, {NUMBER, 5, "123"}},
expected: []Item{{SUB, 0, "-"}, {NUMBER, 1, "Inf"}, {NUMBER, 5, "123"}},
}, {
input: "0x123",
expected: []item{{NUMBER, 0, "0x123"}},
expected: []Item{{NUMBER, 0, "0x123"}},
},
},
},
@ -138,22 +138,22 @@ var tests = []struct {
tests: []testCase{
{
input: "\"test\\tsequence\"",
expected: []item{{STRING, 0, `"test\tsequence"`}},
expected: []Item{{STRING, 0, `"test\tsequence"`}},
},
{
input: "\"test\\\\.expression\"",
expected: []item{{STRING, 0, `"test\\.expression"`}},
expected: []Item{{STRING, 0, `"test\\.expression"`}},
},
{
input: "\"test\\.expression\"",
expected: []item{
expected: []Item{
{ERROR, 0, "unknown escape sequence U+002E '.'"},
{STRING, 0, `"test\.expression"`},
},
},
{
input: "`test\\.expression`",
expected: []item{{STRING, 0, "`test\\.expression`"}},
expected: []Item{{STRING, 0, "`test\\.expression`"}},
},
{
// See https://github.com/prometheus/prometheus/issues/939.
@ -167,19 +167,19 @@ var tests = []struct {
tests: []testCase{
{
input: "5s",
expected: []item{{DURATION, 0, "5s"}},
expected: []Item{{DURATION, 0, "5s"}},
}, {
input: "123m",
expected: []item{{DURATION, 0, "123m"}},
expected: []Item{{DURATION, 0, "123m"}},
}, {
input: "1h",
expected: []item{{DURATION, 0, "1h"}},
expected: []Item{{DURATION, 0, "1h"}},
}, {
input: "3w",
expected: []item{{DURATION, 0, "3w"}},
expected: []Item{{DURATION, 0, "3w"}},
}, {
input: "1y",
expected: []item{{DURATION, 0, "1y"}},
expected: []Item{{DURATION, 0, "1y"}},
},
},
},
@ -188,16 +188,16 @@ var tests = []struct {
tests: []testCase{
{
input: "abc",
expected: []item{{IDENTIFIER, 0, "abc"}},
expected: []Item{{IDENTIFIER, 0, "abc"}},
}, {
input: "a:bc",
expected: []item{{METRIC_IDENTIFIER, 0, "a:bc"}},
expected: []Item{{METRIC_IDENTIFIER, 0, "a:bc"}},
}, {
input: "abc d",
expected: []item{{IDENTIFIER, 0, "abc"}, {IDENTIFIER, 4, "d"}},
expected: []Item{{IDENTIFIER, 0, "abc"}, {IDENTIFIER, 4, "d"}},
}, {
input: ":bc",
expected: []item{{METRIC_IDENTIFIER, 0, ":bc"}},
expected: []Item{{METRIC_IDENTIFIER, 0, ":bc"}},
}, {
input: "0a:bc",
fail: true,
@ -209,10 +209,10 @@ var tests = []struct {
tests: []testCase{
{
input: "# some comment",
expected: []item{{COMMENT, 0, "# some comment"}},
expected: []Item{{COMMENT, 0, "# some comment"}},
}, {
input: "5 # 1+1\n5",
expected: []item{
expected: []Item{
{NUMBER, 0, "5"},
{COMMENT, 2, "# 1+1"},
{NUMBER, 8, "5"},
@ -225,56 +225,56 @@ var tests = []struct {
tests: []testCase{
{
input: `=`,
expected: []item{{ASSIGN, 0, `=`}},
expected: []Item{{ASSIGN, 0, `=`}},
}, {
// Inside braces equality is a single '=' character.
input: `{=}`,
expected: []item{{LEFT_BRACE, 0, `{`}, {EQL, 1, `=`}, {RIGHT_BRACE, 2, `}`}},
expected: []Item{{LEFT_BRACE, 0, `{`}, {EQL, 1, `=`}, {RIGHT_BRACE, 2, `}`}},
}, {
input: `==`,
expected: []item{{EQL, 0, `==`}},
expected: []Item{{EQL, 0, `==`}},
}, {
input: `!=`,
expected: []item{{NEQ, 0, `!=`}},
expected: []Item{{NEQ, 0, `!=`}},
}, {
input: `<`,
expected: []item{{LSS, 0, `<`}},
expected: []Item{{LSS, 0, `<`}},
}, {
input: `>`,
expected: []item{{GTR, 0, `>`}},
expected: []Item{{GTR, 0, `>`}},
}, {
input: `>=`,
expected: []item{{GTE, 0, `>=`}},
expected: []Item{{GTE, 0, `>=`}},
}, {
input: `<=`,
expected: []item{{LTE, 0, `<=`}},
expected: []Item{{LTE, 0, `<=`}},
}, {
input: `+`,
expected: []item{{ADD, 0, `+`}},
expected: []Item{{ADD, 0, `+`}},
}, {
input: `-`,
expected: []item{{SUB, 0, `-`}},
expected: []Item{{SUB, 0, `-`}},
}, {
input: `*`,
expected: []item{{MUL, 0, `*`}},
expected: []Item{{MUL, 0, `*`}},
}, {
input: `/`,
expected: []item{{DIV, 0, `/`}},
expected: []Item{{DIV, 0, `/`}},
}, {
input: `^`,
expected: []item{{POW, 0, `^`}},
expected: []Item{{POW, 0, `^`}},
}, {
input: `%`,
expected: []item{{MOD, 0, `%`}},
expected: []Item{{MOD, 0, `%`}},
}, {
input: `AND`,
expected: []item{{LAND, 0, `AND`}},
expected: []Item{{LAND, 0, `AND`}},
}, {
input: `or`,
expected: []item{{LOR, 0, `or`}},
expected: []Item{{LOR, 0, `or`}},
}, {
input: `unless`,
expected: []item{{LUNLESS, 0, `unless`}},
expected: []Item{{LUNLESS, 0, `unless`}},
},
},
},
@ -283,25 +283,25 @@ var tests = []struct {
tests: []testCase{
{
input: `sum`,
expected: []item{{SUM, 0, `sum`}},
expected: []Item{{SUM, 0, `sum`}},
}, {
input: `AVG`,
expected: []item{{AVG, 0, `AVG`}},
expected: []Item{{AVG, 0, `AVG`}},
}, {
input: `MAX`,
expected: []item{{MAX, 0, `MAX`}},
expected: []Item{{MAX, 0, `MAX`}},
}, {
input: `min`,
expected: []item{{MIN, 0, `min`}},
expected: []Item{{MIN, 0, `min`}},
}, {
input: `count`,
expected: []item{{COUNT, 0, `count`}},
expected: []Item{{COUNT, 0, `count`}},
}, {
input: `stdvar`,
expected: []item{{STDVAR, 0, `stdvar`}},
expected: []Item{{STDVAR, 0, `stdvar`}},
}, {
input: `stddev`,
expected: []item{{STDDEV, 0, `stddev`}},
expected: []Item{{STDDEV, 0, `stddev`}},
},
},
},
@ -310,28 +310,28 @@ var tests = []struct {
tests: []testCase{
{
input: "offset",
expected: []item{{OFFSET, 0, "offset"}},
expected: []Item{{OFFSET, 0, "offset"}},
}, {
input: "by",
expected: []item{{BY, 0, "by"}},
expected: []Item{{BY, 0, "by"}},
}, {
input: "without",
expected: []item{{WITHOUT, 0, "without"}},
expected: []Item{{WITHOUT, 0, "without"}},
}, {
input: "on",
expected: []item{{ON, 0, "on"}},
expected: []Item{{ON, 0, "on"}},
}, {
input: "ignoring",
expected: []item{{IGNORING, 0, "ignoring"}},
expected: []Item{{IGNORING, 0, "ignoring"}},
}, {
input: "group_left",
expected: []item{{GROUP_LEFT, 0, "group_left"}},
expected: []Item{{GROUP_LEFT, 0, "group_left"}},
}, {
input: "group_right",
expected: []item{{GROUP_RIGHT, 0, "group_right"}},
expected: []Item{{GROUP_RIGHT, 0, "group_right"}},
}, {
input: "bool",
expected: []item{{BOOL, 0, "bool"}},
expected: []Item{{BOOL, 0, "bool"}},
},
},
},
@ -349,7 +349,7 @@ var tests = []struct {
fail: true,
}, {
input: `{foo='bar'}`,
expected: []item{
expected: []Item{
{LEFT_BRACE, 0, `{`},
{IDENTIFIER, 1, `foo`},
{EQL, 4, `=`},
@ -358,7 +358,7 @@ var tests = []struct {
},
}, {
input: `{foo="bar"}`,
expected: []item{
expected: []Item{
{LEFT_BRACE, 0, `{`},
{IDENTIFIER, 1, `foo`},
{EQL, 4, `=`},
@ -367,7 +367,7 @@ var tests = []struct {
},
}, {
input: `{foo="bar\"bar"}`,
expected: []item{
expected: []Item{
{LEFT_BRACE, 0, `{`},
{IDENTIFIER, 1, `foo`},
{EQL, 4, `=`},
@ -376,7 +376,7 @@ var tests = []struct {
},
}, {
input: `{NaN != "bar" }`,
expected: []item{
expected: []Item{
{LEFT_BRACE, 0, `{`},
{IDENTIFIER, 1, `NaN`},
{NEQ, 5, `!=`},
@ -385,7 +385,7 @@ var tests = []struct {
},
}, {
input: `{alert=~"bar" }`,
expected: []item{
expected: []Item{
{LEFT_BRACE, 0, `{`},
{IDENTIFIER, 1, `alert`},
{EQL_REGEX, 6, `=~`},
@ -394,7 +394,7 @@ var tests = []struct {
},
}, {
input: `{on!~"bar"}`,
expected: []item{
expected: []Item{
{LEFT_BRACE, 0, `{`},
{IDENTIFIER, 1, `on`},
{NEQ_REGEX, 3, `!~`},
@ -468,7 +468,7 @@ var tests = []struct {
tests: []testCase{
{
input: `{} _ 1 x .3`,
expected: []item{
expected: []Item{
{LEFT_BRACE, 0, `{`},
{RIGHT_BRACE, 1, `}`},
{SPACE, 2, ` `},
@ -484,7 +484,7 @@ var tests = []struct {
},
{
input: `metric +Inf Inf NaN`,
expected: []item{
expected: []Item{
{IDENTIFIER, 0, `metric`},
{SPACE, 6, ` `},
{ADD, 7, `+`},
@ -498,7 +498,7 @@ var tests = []struct {
},
{
input: `metric 1+1x4`,
expected: []item{
expected: []Item{
{IDENTIFIER, 0, `metric`},
{SPACE, 6, ` `},
{NUMBER, 7, `1`},
@ -516,7 +516,7 @@ var tests = []struct {
tests: []testCase{
{
input: `test_name{on!~"bar"}[4m:4s]`,
expected: []item{
expected: []Item{
{IDENTIFIER, 0, `test_name`},
{LEFT_BRACE, 9, `{`},
{IDENTIFIER, 10, `on`},
@ -532,7 +532,7 @@ var tests = []struct {
},
{
input: `test:name{on!~"bar"}[4m:4s]`,
expected: []item{
expected: []Item{
{METRIC_IDENTIFIER, 0, `test:name`},
{LEFT_BRACE, 9, `{`},
{IDENTIFIER, 10, `on`},
@ -547,7 +547,7 @@ var tests = []struct {
},
}, {
input: `test:name{on!~"b:ar"}[4m:4s]`,
expected: []item{
expected: []Item{
{METRIC_IDENTIFIER, 0, `test:name`},
{LEFT_BRACE, 9, `{`},
{IDENTIFIER, 10, `on`},
@ -562,7 +562,7 @@ var tests = []struct {
},
}, {
input: `test:name{on!~"b:ar"}[4m:]`,
expected: []item{
expected: []Item{
{METRIC_IDENTIFIER, 0, `test:name`},
{LEFT_BRACE, 9, `{`},
{IDENTIFIER, 10, `on`},
@ -576,7 +576,7 @@ var tests = []struct {
},
}, { // Nested Subquery.
input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:])[4m:3s]`,
expected: []item{
expected: []Item{
{IDENTIFIER, 0, `min_over_time`},
{LEFT_PAREN, 13, `(`},
@ -607,7 +607,7 @@ var tests = []struct {
// Subquery with offset.
{
input: `test:name{on!~"b:ar"}[4m:4s] offset 10m`,
expected: []item{
expected: []Item{
{METRIC_IDENTIFIER, 0, `test:name`},
{LEFT_BRACE, 9, `{`},
{IDENTIFIER, 10, `on`},
@ -624,7 +624,7 @@ var tests = []struct {
},
}, {
input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:] offset 6m)[4m:3s]`,
expected: []item{
expected: []Item{
{IDENTIFIER, 0, `min_over_time`},
{LEFT_PAREN, 13, `(`},
@ -656,7 +656,7 @@ var tests = []struct {
},
{
input: `test:name[ 5m]`,
expected: []item{
expected: []Item{
{METRIC_IDENTIFIER, 0, `test:name`},
{LEFT_BRACKET, 9, `[`},
{DURATION, 11, `5m`},
@ -693,28 +693,28 @@ func TestLexer(t *testing.T) {
for _, typ := range tests {
t.Run(typ.name, func(t *testing.T) {
for i, test := range typ.tests {
l := &lexer{
l := &Lexer{
input: test.input,
seriesDesc: test.seriesDesc,
}
l.run()
out := l.items
out := l.Items
lastItem := out[len(out)-1]
if test.fail {
if lastItem.typ != ERROR {
if lastItem.Typ != ERROR {
t.Logf("%d: input %q", i, test.input)
t.Fatalf("expected lexing error but did not fail")
}
continue
}
if lastItem.typ == ERROR {
if lastItem.Typ == ERROR {
t.Logf("%d: input %q", i, test.input)
t.Fatalf("unexpected lexing error at position %d: %s", lastItem.pos, lastItem)
t.Fatalf("unexpected lexing error at position %d: %s", lastItem.Pos, lastItem)
}
eofItem := item{EOF, Pos(len(test.input)), ""}
eofItem := Item{EOF, Pos(len(test.input)), ""}
testutil.Equals(t, lastItem, eofItem, "%d: input %q", i, test.input)
out = out[:len(out)-1]

View file

@ -32,11 +32,11 @@ import (
)
type parser struct {
lex *lexer
token item
lex *Lexer
token Item
peeking bool
inject item
inject Item
injecting bool
switchSymbols []ItemType
@ -74,7 +74,7 @@ func ParseMetric(input string) (m labels.Labels, err error) {
defer p.recover(&err)
m = p.metric()
if p.peek().typ != EOF {
if p.peek().Typ != EOF {
p.errorf("could not parse remaining input %.15q...", p.lex.input[p.lex.lastPos:])
}
return m, nil
@ -87,11 +87,11 @@ func ParseMetricSelector(input string) (m []*labels.Matcher, err error) {
defer p.recover(&err)
name := ""
if t := p.peek().typ; t == METRIC_IDENTIFIER || t == IDENTIFIER {
name = p.next().val
if t := p.peek().Typ; t == METRIC_IDENTIFIER || t == IDENTIFIER {
name = p.next().Val
}
vs := p.VectorSelector(name)
if p.peek().typ != EOF {
if p.peek().Typ != EOF {
p.errorf("could not parse remaining input %.15q...", p.lex.input[p.lex.lastPos:])
}
return vs.LabelMatchers, nil
@ -100,7 +100,7 @@ func ParseMetricSelector(input string) (m []*labels.Matcher, err error) {
// newParser returns a new parser.
func newParser(input string) *parser {
p := &parser{
lex: lex(input),
lex: Lex(input),
}
return p
}
@ -109,7 +109,7 @@ func newParser(input string) *parser {
func (p *parser) parseExpr() (expr Expr, err error) {
defer p.recover(&err)
for p.peek().typ != EOF {
for p.peek().Typ != EOF {
if expr != nil {
p.errorf("could not parse remaining input %.15q...", p.lex.input[p.lex.lastPos:])
}
@ -151,20 +151,20 @@ func (p *parser) parseSeriesDesc() (m labels.Labels, vals []sequenceValue, err e
const ctx = "series values"
for {
for p.peek().typ == SPACE {
for p.peek().Typ == SPACE {
p.next()
}
if p.peek().typ == EOF {
if p.peek().Typ == EOF {
break
}
// Extract blanks.
if p.peek().typ == BLANK {
if p.peek().Typ == BLANK {
p.next()
times := uint64(1)
if p.peek().typ == TIMES {
if p.peek().Typ == TIMES {
p.next()
times, err = strconv.ParseUint(p.expect(NUMBER, ctx).val, 10, 64)
times, err = strconv.ParseUint(p.expect(NUMBER, ctx).Val, 10, 64)
if err != nil {
p.errorf("invalid repetition in %s: %s", ctx, err)
}
@ -174,7 +174,7 @@ func (p *parser) parseSeriesDesc() (m labels.Labels, vals []sequenceValue, err e
}
// This is to ensure that there is a space between this and the next number.
// This is especially required if the next number is negative.
if t := p.expectOneOf(SPACE, EOF, ctx).typ; t == EOF {
if t := p.expectOneOf(SPACE, EOF, ctx).Typ; t == EOF {
break
}
continue
@ -182,15 +182,15 @@ func (p *parser) parseSeriesDesc() (m labels.Labels, vals []sequenceValue, err e
// Extract values.
sign := 1.0
if t := p.peek().typ; t == SUB || t == ADD {
if p.next().typ == SUB {
if t := p.peek().Typ; t == SUB || t == ADD {
if p.next().Typ == SUB {
sign = -1
}
}
var k float64
if t := p.peek().typ; t == NUMBER {
k = sign * p.number(p.expect(NUMBER, ctx).val)
} else if t == IDENTIFIER && p.peek().val == "stale" {
if t := p.peek().Typ; t == NUMBER {
k = sign * p.number(p.expect(NUMBER, ctx).Val)
} else if t == IDENTIFIER && p.peek().Val == "stale" {
p.next()
k = math.Float64frombits(value.StaleNaN)
} else {
@ -201,24 +201,24 @@ func (p *parser) parseSeriesDesc() (m labels.Labels, vals []sequenceValue, err e
})
// If there are no offset repetitions specified, proceed with the next value.
if t := p.peek(); t.typ == SPACE {
if t := p.peek(); t.Typ == SPACE {
// This ensures there is a space between every value.
continue
} else if t.typ == EOF {
} else if t.Typ == EOF {
break
} else if t.typ != ADD && t.typ != SUB {
} else if t.Typ != ADD && t.Typ != SUB {
p.errorf("expected next value or relative expansion in %s but got %s (value: %s)", ctx, t.desc(), p.peek())
}
// Expand the repeated offsets into values.
sign = 1.0
if p.next().typ == SUB {
if p.next().Typ == SUB {
sign = -1.0
}
offset := sign * p.number(p.expect(NUMBER, ctx).val)
offset := sign * p.number(p.expect(NUMBER, ctx).Val)
p.expect(TIMES, ctx)
times, err := strconv.ParseUint(p.expect(NUMBER, ctx).val, 10, 64)
times, err := strconv.ParseUint(p.expect(NUMBER, ctx).Val, 10, 64)
if err != nil {
p.errorf("invalid repetition in %s: %s", ctx, err)
}
@ -232,7 +232,7 @@ func (p *parser) parseSeriesDesc() (m labels.Labels, vals []sequenceValue, err e
// This is to ensure that there is a space between this expanding notation
// and the next number. This is especially required if the next number
// is negative.
if t := p.expectOneOf(SPACE, EOF, ctx).typ; t == EOF {
if t := p.expectOneOf(SPACE, EOF, ctx).Typ; t == EOF {
break
}
}
@ -248,35 +248,35 @@ func (p *parser) typecheck(node Node) (err error) {
}
// next returns the next token.
func (p *parser) next() item {
func (p *parser) next() Item {
if !p.peeking {
t := p.lex.nextItem()
t := p.lex.NextItem()
// Skip comments.
for t.typ == COMMENT {
t = p.lex.nextItem()
for t.Typ == COMMENT {
t = p.lex.NextItem()
}
p.token = t
}
p.peeking = false
if p.token.typ == ERROR {
p.errorf("%s", p.token.val)
if p.token.Typ == ERROR {
p.errorf("%s", p.token.Val)
}
return p.token
}
// peek returns but does not consume the next token.
func (p *parser) peek() item {
func (p *parser) peek() Item {
if p.peeking {
return p.token
}
p.peeking = true
t := p.lex.nextItem()
t := p.lex.NextItem()
// Skip comments.
for t.typ == COMMENT {
t = p.lex.nextItem()
for t.Typ == COMMENT {
t = p.lex.NextItem()
}
p.token = t
return p.token
@ -306,18 +306,18 @@ func (p *parser) error(err error) {
}
// expect consumes the next token and guarantees it has the required type.
func (p *parser) expect(exp ItemType, context string) item {
func (p *parser) expect(exp ItemType, context string) Item {
token := p.next()
if token.typ != exp {
if token.Typ != exp {
p.errorf("unexpected %s in %s, expected %s", token.desc(), context, exp.desc())
}
return token
}
// expectOneOf consumes the next token and guarantees it has one of the required types.
func (p *parser) expectOneOf(exp1, exp2 ItemType, context string) item {
func (p *parser) expectOneOf(exp1, exp2 ItemType, context string) Item {
token := p.next()
if token.typ != exp1 && token.typ != exp2 {
if token.Typ != exp1 && token.Typ != exp2 {
p.errorf("unexpected %s in %s, expected %s or %s", token.desc(), context, exp1.desc(), exp2.desc())
}
return token
@ -342,7 +342,7 @@ func (p *parser) recover(errp *error) {
}
// Lex is expected by the yyLexer interface of the yacc generated parser.
// It writes the next item provided by the lexer to the provided pointer address.
// It writes the next Item provided by the lexer to the provided pointer address.
// Comments are skipped.
//
// The yyLexer interface is currently implemented by the parser to allow
@ -358,7 +358,7 @@ func (p *parser) Lex(lval *yySymType) int {
lval.item = p.next()
}
typ := lval.item.typ
typ := lval.item.Typ
for _, t := range p.switchSymbols {
if t == typ {
@ -377,23 +377,23 @@ func (p *parser) Lex(lval *yySymType) int {
func (p *parser) Error(e string) {
}
// InjectItem allows injecting a single item at the beginning of the token stream
// InjectItem allows injecting a single Item at the beginning of the token stream
// consumed by the generated parser.
// This allows having multiple start symbols as described in
// https://www.gnu.org/software/bison/manual/html_node/Multiple-start_002dsymbols.html .
// Only the Lex function used by the generated parser is affected by this injected item.
// Trying to inject when a previously injected item has not yet been consumed will panic.
// Only item types that are supposed to be used as start symbols are allowed as an argument.
// Only the Lex function used by the generated parser is affected by this injected Item.
// Trying to inject when a previously injected Item has not yet been consumed will panic.
// Only Item types that are supposed to be used as start symbols are allowed as an argument.
func (p *parser) InjectItem(typ ItemType) {
if p.injecting {
panic("cannot inject multiple items into the token stream")
panic("cannot inject multiple Items into the token stream")
}
if typ != 0 && (typ <= startSymbolsStart || typ >= startSymbolsEnd) {
panic("cannot inject symbol that isn't start symbol")
}
p.inject = item{typ: typ}
p.inject = Item{Typ: typ}
p.injecting = true
}
@ -406,14 +406,14 @@ func (p *parser) expr() Expr {
// on the operators' precedence.
for {
// If the next token is not an operator the expression is done.
op := p.peek().typ
op := p.peek().Typ
if !op.isOperator() {
// Check for subquery.
if op == LEFT_BRACKET {
expr = p.subqueryOrRangeSelector(expr, false)
if s, ok := expr.(*SubqueryExpr); ok {
// Parse optional offset.
if p.peek().typ == OFFSET {
if p.peek().Typ == OFFSET {
offset := p.offset()
s.Offset = offset
}
@ -434,7 +434,7 @@ func (p *parser) expr() Expr {
returnBool := false
// Parse bool modifier.
if p.peek().typ == BOOL {
if p.peek().Typ == BOOL {
if !op.isComparisonOperator() {
p.errorf("bool modifier can only be used on comparison operators")
}
@ -443,22 +443,22 @@ func (p *parser) expr() Expr {
}
// Parse ON/IGNORING clause.
if p.peek().typ == ON || p.peek().typ == IGNORING {
if p.peek().typ == ON {
if p.peek().Typ == ON || p.peek().Typ == IGNORING {
if p.peek().Typ == ON {
vecMatching.On = true
}
p.next()
vecMatching.MatchingLabels = p.labels()
// Parse grouping.
if t := p.peek().typ; t == GROUP_LEFT || t == GROUP_RIGHT {
if t := p.peek().Typ; t == GROUP_LEFT || t == GROUP_RIGHT {
p.next()
if t == GROUP_LEFT {
vecMatching.Card = CardManyToOne
} else {
vecMatching.Card = CardOneToMany
}
if p.peek().typ == LEFT_PAREN {
if p.peek().Typ == LEFT_PAREN {
vecMatching.Include = p.labels()
}
}
@ -514,19 +514,19 @@ func (p *parser) balance(lhs Expr, op ItemType, rhs Expr, vecMatching *VectorMat
// <Vector_selector> | <Matrix_selector> | (+|-) <number_literal> | '(' <expr> ')'
//
func (p *parser) unaryExpr() Expr {
switch t := p.peek(); t.typ {
switch t := p.peek(); t.Typ {
case ADD, SUB:
p.next()
e := p.unaryExpr()
// Simplify unary expressions for number literals.
if nl, ok := e.(*NumberLiteral); ok {
if t.typ == SUB {
if t.Typ == SUB {
nl.Val *= -1
}
return nl
}
return &UnaryExpr{Op: t.typ, Expr: e}
return &UnaryExpr{Op: t.Typ, Expr: e}
case LEFT_PAREN:
p.next()
@ -538,12 +538,12 @@ func (p *parser) unaryExpr() Expr {
e := p.primaryExpr()
// Expression might be followed by a range selector.
if p.peek().typ == LEFT_BRACKET {
if p.peek().Typ == LEFT_BRACKET {
e = p.subqueryOrRangeSelector(e, true)
}
// Parse optional offset.
if p.peek().typ == OFFSET {
if p.peek().Typ == OFFSET {
offset := p.offset()
switch s := e.(type) {
@ -577,16 +577,16 @@ func (p *parser) subqueryOrRangeSelector(expr Expr, checkRange bool) Expr {
var erange time.Duration
var err error
erangeStr := p.expect(DURATION, ctx).val
erangeStr := p.expect(DURATION, ctx).Val
erange, err = parseDuration(erangeStr)
if err != nil {
p.error(err)
}
var itm item
var itm Item
if checkRange {
itm = p.expectOneOf(RIGHT_BRACKET, COLON, ctx)
if itm.typ == RIGHT_BRACKET {
if itm.Typ == RIGHT_BRACKET {
// Range selector.
vs, ok := expr.(*VectorSelector)
if !ok {
@ -606,8 +606,8 @@ func (p *parser) subqueryOrRangeSelector(expr Expr, checkRange bool) Expr {
var estep time.Duration
itm = p.expectOneOf(RIGHT_BRACKET, DURATION, ctx)
if itm.typ == DURATION {
estepStr := itm.val
if itm.Typ == DURATION {
estepStr := itm.Val
estep, err = parseDuration(estepStr)
if err != nil {
p.error(err)
@ -641,29 +641,29 @@ func (p *parser) number(val string) float64 {
//
func (p *parser) primaryExpr() Expr {
switch t := p.next(); {
case t.typ == NUMBER:
f := p.number(t.val)
case t.Typ == NUMBER:
f := p.number(t.Val)
return &NumberLiteral{f}
case t.typ == STRING:
return &StringLiteral{p.unquoteString(t.val)}
case t.Typ == STRING:
return &StringLiteral{p.unquoteString(t.Val)}
case t.typ == LEFT_BRACE:
case t.Typ == LEFT_BRACE:
// Metric selector without metric name.
p.backup()
return p.VectorSelector("")
case t.typ == IDENTIFIER:
case t.Typ == IDENTIFIER:
// Check for function call.
if p.peek().typ == LEFT_PAREN {
return p.call(t.val)
if p.peek().Typ == LEFT_PAREN {
return p.call(t.Val)
}
fallthrough // Else metric selector.
case t.typ == METRIC_IDENTIFIER:
return p.VectorSelector(t.val)
case t.Typ == METRIC_IDENTIFIER:
return p.VectorSelector(t.Val)
case t.typ.isAggregator():
case t.Typ.isAggregator():
p.backup()
return p.aggrExpr()
@ -683,15 +683,15 @@ func (p *parser) labels() []string {
p.expect(LEFT_PAREN, ctx)
labels := []string{}
if p.peek().typ != RIGHT_PAREN {
if p.peek().Typ != RIGHT_PAREN {
for {
id := p.next()
if !isLabel(id.val) {
if !isLabel(id.Val) {
p.errorf("unexpected %s in %s, expected label", id.desc(), ctx)
}
labels = append(labels, id.val)
labels = append(labels, id.Val)
if p.peek().typ != COMMA {
if p.peek().Typ != COMMA {
break
}
p.next()
@ -711,7 +711,7 @@ func (p *parser) aggrExpr() *AggregateExpr {
const ctx = "aggregation"
agop := p.next()
if !agop.typ.isAggregator() {
if !agop.Typ.isAggregator() {
p.errorf("expected aggregation operator but got %s", agop)
}
var grouping []string
@ -719,7 +719,7 @@ func (p *parser) aggrExpr() *AggregateExpr {
modifiersFirst := false
if t := p.peek().typ; t == BY || t == WITHOUT {
if t := p.peek().Typ; t == BY || t == WITHOUT {
if t == WITHOUT {
without = true
}
@ -730,7 +730,7 @@ func (p *parser) aggrExpr() *AggregateExpr {
p.expect(LEFT_PAREN, ctx)
var param Expr
if agop.typ.isAggregatorWithParam() {
if agop.Typ.isAggregatorWithParam() {
param = p.expr()
p.expect(COMMA, ctx)
}
@ -738,7 +738,7 @@ func (p *parser) aggrExpr() *AggregateExpr {
p.expect(RIGHT_PAREN, ctx)
if !modifiersFirst {
if t := p.peek().typ; t == BY || t == WITHOUT {
if t := p.peek().Typ; t == BY || t == WITHOUT {
if len(grouping) > 0 {
p.errorf("aggregation must only contain one grouping clause")
}
@ -751,7 +751,7 @@ func (p *parser) aggrExpr() *AggregateExpr {
}
return &AggregateExpr{
Op: agop.typ,
Op: agop.Typ,
Expr: e,
Param: param,
Grouping: grouping,
@ -773,7 +773,7 @@ func (p *parser) call(name string) *Call {
p.expect(LEFT_PAREN, ctx)
// Might be call without args.
if p.peek().typ == RIGHT_PAREN {
if p.peek().Typ == RIGHT_PAREN {
p.next() // Consume.
return &Call{fn, nil}
}
@ -784,7 +784,7 @@ func (p *parser) call(name string) *Call {
args = append(args, e)
// Terminate if no more arguments.
if p.peek().typ != COMMA {
if p.peek().Typ != COMMA {
break
}
p.next()
@ -820,7 +820,7 @@ func (p *parser) labelMatchers(operators ...ItemType) []*labels.Matcher {
p.expect(LEFT_BRACE, ctx)
// Check if no matchers are provided.
if p.peek().typ == RIGHT_BRACE {
if p.peek().Typ == RIGHT_BRACE {
p.next()
return matchers
}
@ -828,7 +828,7 @@ func (p *parser) labelMatchers(operators ...ItemType) []*labels.Matcher {
for {
label := p.expect(IDENTIFIER, ctx)
op := p.next().typ
op := p.next().Typ
if !op.isOperator() {
p.errorf("expected label matching operator but got %s", op)
}
@ -842,9 +842,9 @@ func (p *parser) labelMatchers(operators ...ItemType) []*labels.Matcher {
p.errorf("operator must be one of %q, is %q", operators, op)
}
val := p.unquoteString(p.expect(STRING, ctx).val)
val := p.unquoteString(p.expect(STRING, ctx).Val)
// Map the item to the respective match type.
// Map the Item to the respective match type.
var matchType labels.MatchType
switch op {
case EQL:
@ -856,28 +856,28 @@ func (p *parser) labelMatchers(operators ...ItemType) []*labels.Matcher {
case NEQ_REGEX:
matchType = labels.MatchNotRegexp
default:
p.errorf("item %q is not a metric match type", op)
p.errorf("Item %q is not a metric match type", op)
}
m, err := labels.NewMatcher(matchType, label.val, val)
m, err := labels.NewMatcher(matchType, label.Val, val)
if err != nil {
p.error(err)
}
matchers = append(matchers, m)
if p.peek().typ == IDENTIFIER {
p.errorf("missing comma before next identifier %q", p.peek().val)
if p.peek().Typ == IDENTIFIER {
p.errorf("missing comma before next identifier %q", p.peek().Val)
}
// Terminate list if last matcher.
if p.peek().typ != COMMA {
if p.peek().Typ != COMMA {
break
}
p.next()
// Allow comma after each item in a multi-line listing.
if p.peek().typ == RIGHT_BRACE {
// Allow comma after each Item in a multi-line listing.
if p.peek().Typ == RIGHT_BRACE {
break
}
}
@ -896,10 +896,10 @@ func (p *parser) metric() labels.Labels {
name := ""
var m labels.Labels
t := p.peek().typ
t := p.peek().Typ
if t == IDENTIFIER || t == METRIC_IDENTIFIER {
name = p.next().val
t = p.peek().typ
name = p.next().Val
t = p.peek().Typ
}
if t != LEFT_BRACE && name == "" {
p.errorf("missing metric name or metric selector")
@ -924,7 +924,7 @@ func (p *parser) offset() time.Duration {
p.next()
offi := p.expect(DURATION, ctx)
offset, err := parseDuration(offi.val)
offset, err := parseDuration(offi.Val)
if err != nil {
p.error(err)
}
@ -942,7 +942,7 @@ func (p *parser) VectorSelector(name string) *VectorSelector {
Name: name,
}
// Parse label matching if any.
if t := p.peek(); t.typ == LEFT_BRACE {
if t := p.peek(); t.Typ == LEFT_BRACE {
p.generatedParserResult = ret
p.parseGenerated(START_LABELS, []ItemType{RIGHT_BRACE, EOF})
@ -1152,11 +1152,11 @@ func (p *parser) parseGenerated(startSymbol ItemType, switchSymbols []ItemType)
}
func (p *parser) newLabelMatcher(label item, operator item, value item) *labels.Matcher {
op := operator.typ
val := p.unquoteString(value.val)
func (p *parser) newLabelMatcher(label Item, operator Item, value Item) *labels.Matcher {
op := operator.Typ
val := p.unquoteString(value.Val)
// Map the item to the respective match type.
// Map the Item to the respective match type.
var matchType labels.MatchType
switch op {
case EQL:
@ -1173,7 +1173,7 @@ func (p *parser) newLabelMatcher(label item, operator item, value item) *labels.
panic("invalid operator")
}
m, err := labels.NewMatcher(matchType, label.val, val)
m, err := labels.NewMatcher(matchType, label.Val, val)
if err != nil {
p.error(err)
}