From ac3932ea35a20cf81d385df802176991d4bae867 Mon Sep 17 00:00:00 2001 From: Tobias Guggenmos Date: Thu, 21 Nov 2019 18:43:09 +0000 Subject: [PATCH] Improve PromQL parser performance by making it non-concurrent (#6356) Before this commit, the PromQL parser ran in two goroutines: * The lexer goroutine that splits the input into tokens and sent them over a channel to * the parser goroutine which produces the abstract syntax tree The Problem with this approach is that the parser spends more time on goroutine creation and syncronisation than on actual parsing. This commit removes that concurrency and replaces the channel by a slice based buffer. Benchmarks show that this makes the up to 7 times faster than before. Signed-off-by: Tobias Guggenmos --- promql/lex.go | 32 +++++++++++++++++++------------- promql/lex_test.go | 8 ++------ promql/parse_test.go | 3 --- 3 files changed, 21 insertions(+), 22 deletions(-) diff --git a/promql/lex.go b/promql/lex.go index 1c21d23fa6..87e315cc7e 100644 --- a/promql/lex.go +++ b/promql/lex.go @@ -317,13 +317,13 @@ type Pos int // lexer holds the state of the scanner. type lexer struct { - input string // The string being scanned. - state stateFn // The next lexing function to enter. - pos Pos // Current position in the input. - start Pos // Start position of this item. - width Pos // Width of last rune read from input. - lastPos Pos // Position of most recent item returned by nextItem. - items chan item // Channel of scanned items. + input string // The string being scanned. + state stateFn // The next lexing function to enter. + pos Pos // Current position in the input. + start Pos // Start position of this item. + width Pos // Width of last rune read from input. + lastPos Pos // Position of most recent item returned by nextItem. + items []item // Slice buffer of scanned items. parenDepth int // Nesting depth of ( ) exprs. braceOpen bool // Whether a { is opened. @@ -362,7 +362,7 @@ func (l *lexer) backup() { // emit passes an item back to the client. func (l *lexer) emit(t ItemType) { - l.items <- item{t, l.start, l.input[l.start:l.pos]} + l.items = append(l.items, item{t, l.start, l.input[l.start:l.pos]}) l.start = l.pos } @@ -408,13 +408,21 @@ func (l *lexer) linePosition() int { // errorf returns an error token and terminates the scan by passing // back a nil pointer that will be the next state, terminating l.nextItem. func (l *lexer) errorf(format string, args ...interface{}) stateFn { - l.items <- item{ItemError, l.start, fmt.Sprintf(format, args...)} + l.items = append(l.items, item{ItemError, l.start, fmt.Sprintf(format, args...)}) return nil } // nextItem returns the next item from the input. func (l *lexer) nextItem() item { - item := <-l.items + for len(l.items) == 0 { + if l.state != nil { + l.state = l.state(l) + } else { + l.emit(ItemEOF) + } + } + item := l.items[0] + l.items = l.items[1:] l.lastPos = item.pos return item } @@ -423,9 +431,8 @@ func (l *lexer) nextItem() item { func lex(input string) *lexer { l := &lexer{ input: input, - items: make(chan item), + state: lexStatements, } - go l.run() return l } @@ -434,7 +441,6 @@ func (l *lexer) run() { for l.state = lexStatements; l.state != nil; { l.state = l.state(l) } - close(l.items) } // Release resources used by lexer. diff --git a/promql/lex_test.go b/promql/lex_test.go index 7b4b4a219e..ad1ea8ac5c 100644 --- a/promql/lex_test.go +++ b/promql/lex_test.go @@ -695,15 +695,11 @@ func TestLexer(t *testing.T) { for i, test := range typ.tests { l := &lexer{ input: test.input, - items: make(chan item), seriesDesc: test.seriesDesc, } - go l.run() + l.run() - out := []item{} - for it := range l.items { - out = append(out, it) - } + out := l.items lastItem := out[len(out)-1] if test.fail { diff --git a/promql/parse_test.go b/promql/parse_test.go index f303eb4089..8152be2dd8 100644 --- a/promql/parse_test.go +++ b/promql/parse_test.go @@ -1741,9 +1741,6 @@ func TestRecoverParserRuntime(t *testing.T) { defer func() { testutil.Equals(t, err, errUnexpected) - - _, ok := <-p.lex.items - testutil.Assert(t, !ok, "lex.items was not closed") }() defer p.recover(&err) // Cause a runtime panic.