2015-03-30 09:12:51 -07:00
|
|
|
// Copyright 2015 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2020-02-03 08:23:44 -08:00
|
|
|
package parser
|
2015-03-30 09:12:51 -07:00
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"strings"
|
2015-09-30 12:27:08 -07:00
|
|
|
"unicode"
|
2015-03-30 09:12:51 -07:00
|
|
|
"unicode/utf8"
|
2023-09-14 09:57:31 -07:00
|
|
|
|
|
|
|
"github.com/prometheus/prometheus/promql/parser/posrange"
|
2015-03-30 09:12:51 -07:00
|
|
|
)
|
|
|
|
|
2019-12-09 11:03:31 -08:00
|
|
|
// Item represents a token or text string returned from the scanner.
|
|
|
|
type Item struct {
|
2023-09-14 09:57:31 -07:00
|
|
|
Typ ItemType // The type of this Item.
|
|
|
|
Pos posrange.Pos // The starting position, in bytes, of this Item in the input string.
|
|
|
|
Val string // The value of this Item.
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
2019-12-09 11:03:31 -08:00
|
|
|
// String returns a descriptive string for the Item.
|
|
|
|
func (i Item) String() string {
|
2015-03-30 09:12:51 -07:00
|
|
|
switch {
|
2019-12-09 11:03:31 -08:00
|
|
|
case i.Typ == EOF:
|
2015-03-30 09:12:51 -07:00
|
|
|
return "EOF"
|
2019-12-09 11:03:31 -08:00
|
|
|
case i.Typ == ERROR:
|
|
|
|
return i.Val
|
|
|
|
case i.Typ == IDENTIFIER || i.Typ == METRIC_IDENTIFIER:
|
|
|
|
return fmt.Sprintf("%q", i.Val)
|
2020-02-03 09:48:27 -08:00
|
|
|
case i.Typ.IsKeyword():
|
2019-12-09 11:03:31 -08:00
|
|
|
return fmt.Sprintf("<%s>", i.Val)
|
2020-02-03 09:48:27 -08:00
|
|
|
case i.Typ.IsOperator():
|
2019-12-09 11:03:31 -08:00
|
|
|
return fmt.Sprintf("<op:%s>", i.Val)
|
2020-02-03 09:48:27 -08:00
|
|
|
case i.Typ.IsAggregator():
|
2019-12-09 11:03:31 -08:00
|
|
|
return fmt.Sprintf("<aggr:%s>", i.Val)
|
|
|
|
case len(i.Val) > 10:
|
|
|
|
return fmt.Sprintf("%.10q...", i.Val)
|
|
|
|
}
|
|
|
|
return fmt.Sprintf("%q", i.Val)
|
|
|
|
}
|
|
|
|
|
2022-07-07 05:43:36 -07:00
|
|
|
// Pretty returns the prettified form of an item.
|
|
|
|
// This is same as the item's stringified format.
|
|
|
|
func (i Item) Pretty(int) string { return i.String() }
|
|
|
|
|
2020-06-28 03:03:09 -07:00
|
|
|
// IsOperator returns true if the Item corresponds to a arithmetic or set operator.
|
2015-03-30 09:12:51 -07:00
|
|
|
// Returns false otherwise.
|
2020-02-03 09:48:27 -08:00
|
|
|
func (i ItemType) IsOperator() bool { return i > operatorsStart && i < operatorsEnd }
|
2015-03-30 09:12:51 -07:00
|
|
|
|
2020-06-28 03:03:09 -07:00
|
|
|
// IsAggregator returns true if the Item belongs to the aggregator functions.
|
2023-10-03 13:09:25 -07:00
|
|
|
// Returns false otherwise.
|
2020-02-03 09:48:27 -08:00
|
|
|
func (i ItemType) IsAggregator() bool { return i > aggregatorsStart && i < aggregatorsEnd }
|
2015-03-30 09:12:51 -07:00
|
|
|
|
2020-06-28 03:03:09 -07:00
|
|
|
// IsAggregatorWithParam returns true if the Item is an aggregator that takes a parameter.
|
2023-10-03 13:09:25 -07:00
|
|
|
// Returns false otherwise.
|
2020-02-03 09:48:27 -08:00
|
|
|
func (i ItemType) IsAggregatorWithParam() bool {
|
2019-11-26 05:29:42 -08:00
|
|
|
return i == TOPK || i == BOTTOMK || i == COUNT_VALUES || i == QUANTILE
|
2016-07-05 09:12:19 -07:00
|
|
|
}
|
2016-07-04 05:10:42 -07:00
|
|
|
|
2020-06-28 03:03:09 -07:00
|
|
|
// IsKeyword returns true if the Item corresponds to a keyword.
|
2015-03-30 09:12:51 -07:00
|
|
|
// Returns false otherwise.
|
2020-02-03 09:48:27 -08:00
|
|
|
func (i ItemType) IsKeyword() bool { return i > keywordsStart && i < keywordsEnd }
|
2015-03-30 09:12:51 -07:00
|
|
|
|
2020-02-03 09:48:27 -08:00
|
|
|
// IsComparisonOperator returns true if the Item corresponds to a comparison operator.
|
2015-10-10 08:19:14 -07:00
|
|
|
// Returns false otherwise.
|
2020-02-03 09:48:27 -08:00
|
|
|
func (i ItemType) IsComparisonOperator() bool {
|
2015-10-10 08:19:14 -07:00
|
|
|
switch i {
|
2020-09-09 03:10:02 -07:00
|
|
|
case EQLC, NEQ, LTE, LSS, GTE, GTR:
|
2015-10-10 08:19:14 -07:00
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-28 03:03:09 -07:00
|
|
|
// IsSetOperator returns whether the Item corresponds to a set operator.
|
2020-02-03 09:48:27 -08:00
|
|
|
func (i ItemType) IsSetOperator() bool {
|
2016-04-02 15:52:18 -07:00
|
|
|
switch i {
|
2019-11-26 05:29:42 -08:00
|
|
|
case LAND, LOR, LUNLESS:
|
2016-04-02 15:52:18 -07:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-03-08 08:52:44 -08:00
|
|
|
type ItemType int
|
2015-03-30 09:12:51 -07:00
|
|
|
|
2019-12-18 06:18:52 -08:00
|
|
|
// This is a list of all keywords in PromQL.
|
|
|
|
// When changing this list, make sure to also change
|
|
|
|
// the maybe_label grammar rule in the generated parser
|
|
|
|
// to avoid misinterpretation of labels as keywords.
|
2018-03-08 08:52:44 -08:00
|
|
|
var key = map[string]ItemType{
|
2015-03-30 09:12:51 -07:00
|
|
|
// Operators.
|
2019-11-26 05:29:42 -08:00
|
|
|
"and": LAND,
|
|
|
|
"or": LOR,
|
|
|
|
"unless": LUNLESS,
|
2021-08-24 16:22:14 -07:00
|
|
|
"atan2": ATAN2,
|
2015-03-30 09:12:51 -07:00
|
|
|
|
|
|
|
// Aggregators.
|
2019-11-26 05:29:42 -08:00
|
|
|
"sum": SUM,
|
|
|
|
"avg": AVG,
|
|
|
|
"count": COUNT,
|
|
|
|
"min": MIN,
|
|
|
|
"max": MAX,
|
2020-06-30 07:51:18 -07:00
|
|
|
"group": GROUP,
|
2019-11-26 05:29:42 -08:00
|
|
|
"stddev": STDDEV,
|
|
|
|
"stdvar": STDVAR,
|
|
|
|
"topk": TOPK,
|
|
|
|
"bottomk": BOTTOMK,
|
|
|
|
"count_values": COUNT_VALUES,
|
|
|
|
"quantile": QUANTILE,
|
2015-03-30 09:12:51 -07:00
|
|
|
|
|
|
|
// Keywords.
|
2019-11-26 05:29:42 -08:00
|
|
|
"offset": OFFSET,
|
|
|
|
"by": BY,
|
|
|
|
"without": WITHOUT,
|
|
|
|
"on": ON,
|
|
|
|
"ignoring": IGNORING,
|
|
|
|
"group_left": GROUP_LEFT,
|
|
|
|
"group_right": GROUP_RIGHT,
|
|
|
|
"bool": BOOL,
|
2021-02-09 08:03:16 -08:00
|
|
|
|
|
|
|
// Preprocessors.
|
|
|
|
"start": START,
|
|
|
|
"end": END,
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
2023-08-25 14:35:42 -07:00
|
|
|
var histogramDesc = map[string]ItemType{
|
2024-05-08 04:58:24 -07:00
|
|
|
"sum": SUM_DESC,
|
|
|
|
"count": COUNT_DESC,
|
|
|
|
"schema": SCHEMA_DESC,
|
|
|
|
"offset": OFFSET_DESC,
|
|
|
|
"n_offset": NEGATIVE_OFFSET_DESC,
|
|
|
|
"buckets": BUCKETS_DESC,
|
|
|
|
"n_buckets": NEGATIVE_BUCKETS_DESC,
|
|
|
|
"z_bucket": ZERO_BUCKET_DESC,
|
|
|
|
"z_bucket_w": ZERO_BUCKET_WIDTH_DESC,
|
|
|
|
"custom_values": CUSTOM_VALUES_DESC,
|
2023-08-25 14:35:42 -07:00
|
|
|
}
|
|
|
|
|
2020-03-23 07:47:11 -07:00
|
|
|
// ItemTypeStr is the default string representations for common Items. It does not
|
2019-12-09 11:03:31 -08:00
|
|
|
// imply that those are the only character sequences that can be lexed to such an Item.
|
|
|
|
var ItemTypeStr = map[ItemType]string{
|
2023-08-25 14:35:42 -07:00
|
|
|
OPEN_HIST: "{{",
|
|
|
|
CLOSE_HIST: "}}",
|
2019-11-26 05:29:42 -08:00
|
|
|
LEFT_PAREN: "(",
|
|
|
|
RIGHT_PAREN: ")",
|
|
|
|
LEFT_BRACE: "{",
|
|
|
|
RIGHT_BRACE: "}",
|
|
|
|
LEFT_BRACKET: "[",
|
|
|
|
RIGHT_BRACKET: "]",
|
|
|
|
COMMA: ",",
|
2020-09-09 03:10:02 -07:00
|
|
|
EQL: "=",
|
2019-11-26 05:29:42 -08:00
|
|
|
COLON: ":",
|
|
|
|
SEMICOLON: ";",
|
|
|
|
BLANK: "_",
|
|
|
|
TIMES: "x",
|
|
|
|
SPACE: "<space>",
|
|
|
|
|
|
|
|
SUB: "-",
|
|
|
|
ADD: "+",
|
|
|
|
MUL: "*",
|
|
|
|
MOD: "%",
|
|
|
|
DIV: "/",
|
2020-09-09 03:10:02 -07:00
|
|
|
EQLC: "==",
|
2019-11-26 05:29:42 -08:00
|
|
|
NEQ: "!=",
|
|
|
|
LTE: "<=",
|
|
|
|
LSS: "<",
|
|
|
|
GTE: ">=",
|
|
|
|
GTR: ">",
|
|
|
|
EQL_REGEX: "=~",
|
|
|
|
NEQ_REGEX: "!~",
|
|
|
|
POW: "^",
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
2019-12-09 11:03:31 -08:00
|
|
|
// Add keywords to Item type strings.
|
2015-03-30 09:12:51 -07:00
|
|
|
for s, ty := range key {
|
2019-12-09 11:03:31 -08:00
|
|
|
ItemTypeStr[ty] = s
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
2015-05-12 01:39:10 -07:00
|
|
|
// Special numbers.
|
2019-11-26 05:29:42 -08:00
|
|
|
key["inf"] = NUMBER
|
|
|
|
key["nan"] = NUMBER
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
2018-03-08 08:52:44 -08:00
|
|
|
func (i ItemType) String() string {
|
2019-12-09 11:03:31 -08:00
|
|
|
if s, ok := ItemTypeStr[i]; ok {
|
2015-03-30 09:12:51 -07:00
|
|
|
return s
|
|
|
|
}
|
2019-12-09 11:03:31 -08:00
|
|
|
return fmt.Sprintf("<Item %d>", i)
|
2015-04-29 07:35:18 -07:00
|
|
|
}
|
|
|
|
|
2019-12-09 11:03:31 -08:00
|
|
|
func (i Item) desc() string {
|
|
|
|
if _, ok := ItemTypeStr[i.Typ]; ok {
|
2015-04-29 07:35:18 -07:00
|
|
|
return i.String()
|
|
|
|
}
|
2019-12-09 11:03:31 -08:00
|
|
|
if i.Typ == EOF {
|
|
|
|
return i.Typ.desc()
|
2015-04-29 07:35:18 -07:00
|
|
|
}
|
2019-12-09 11:03:31 -08:00
|
|
|
return fmt.Sprintf("%s %s", i.Typ.desc(), i)
|
2015-04-29 07:35:18 -07:00
|
|
|
}
|
|
|
|
|
2018-03-08 08:52:44 -08:00
|
|
|
func (i ItemType) desc() string {
|
2015-08-24 06:07:27 -07:00
|
|
|
switch i {
|
2019-11-26 05:29:42 -08:00
|
|
|
case ERROR:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "error"
|
2019-11-26 05:29:42 -08:00
|
|
|
case EOF:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "end of input"
|
2019-11-26 05:29:42 -08:00
|
|
|
case COMMENT:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "comment"
|
2019-11-26 05:29:42 -08:00
|
|
|
case IDENTIFIER:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "identifier"
|
2019-11-26 05:29:42 -08:00
|
|
|
case METRIC_IDENTIFIER:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "metric identifier"
|
2019-11-26 05:29:42 -08:00
|
|
|
case STRING:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "string"
|
2019-11-26 05:29:42 -08:00
|
|
|
case NUMBER:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "number"
|
2019-11-26 05:29:42 -08:00
|
|
|
case DURATION:
|
2015-04-29 07:35:18 -07:00
|
|
|
return "duration"
|
|
|
|
}
|
2015-08-24 06:07:27 -07:00
|
|
|
return fmt.Sprintf("%q", i)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
const eof = -1
|
|
|
|
|
|
|
|
// stateFn represents the state of the scanner as a function that returns the next state.
|
2019-12-09 11:03:31 -08:00
|
|
|
type stateFn func(*Lexer) stateFn
|
2015-03-30 09:12:51 -07:00
|
|
|
|
2023-08-25 14:35:42 -07:00
|
|
|
type histogramState int
|
|
|
|
|
|
|
|
const (
|
|
|
|
histogramStateNone histogramState = iota
|
|
|
|
histogramStateOpen
|
|
|
|
histogramStateMul
|
|
|
|
histogramStateAdd
|
|
|
|
histogramStateSub
|
|
|
|
)
|
|
|
|
|
2019-12-09 11:03:31 -08:00
|
|
|
// Lexer holds the state of the scanner.
|
|
|
|
type Lexer struct {
|
2023-09-14 09:57:31 -07:00
|
|
|
input string // The string being scanned.
|
|
|
|
state stateFn // The next lexing function to enter.
|
|
|
|
pos posrange.Pos // Current position in the input.
|
|
|
|
start posrange.Pos // Start position of this Item.
|
|
|
|
width posrange.Pos // Width of last rune read from input.
|
|
|
|
lastPos posrange.Pos // Position of most recent Item returned by NextItem.
|
|
|
|
itemp *Item // Pointer to where the next scanned item should be placed.
|
|
|
|
scannedItem bool // Set to true every time an item is scanned.
|
2015-03-30 09:12:51 -07:00
|
|
|
|
|
|
|
parenDepth int // Nesting depth of ( ) exprs.
|
|
|
|
braceOpen bool // Whether a { is opened.
|
|
|
|
bracketOpen bool // Whether a [ is opened.
|
2018-12-22 05:47:13 -08:00
|
|
|
gotColon bool // Whether we got a ':' after [ was opened.
|
2015-03-30 09:12:51 -07:00
|
|
|
stringOpen rune // Quote rune of the string currently being read.
|
2015-05-11 05:04:53 -07:00
|
|
|
|
2023-08-25 14:35:42 -07:00
|
|
|
// series description variables for internal PromQL testing framework as well as in promtool rules unit tests.
|
|
|
|
// see https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/#series
|
|
|
|
seriesDesc bool // Whether we are lexing a series description.
|
|
|
|
histogramState histogramState // Determines whether or not inside of a histogram description.
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// next returns the next rune in the input.
|
2019-12-09 11:03:31 -08:00
|
|
|
func (l *Lexer) next() rune {
|
2015-03-30 09:12:51 -07:00
|
|
|
if int(l.pos) >= len(l.input) {
|
|
|
|
l.width = 0
|
|
|
|
return eof
|
|
|
|
}
|
|
|
|
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
|
2023-09-14 09:57:31 -07:00
|
|
|
l.width = posrange.Pos(w)
|
2015-03-30 09:12:51 -07:00
|
|
|
l.pos += l.width
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
|
|
|
// peek returns but does not consume the next rune in the input.
|
2019-12-09 11:03:31 -08:00
|
|
|
func (l *Lexer) peek() rune {
|
2015-03-30 09:12:51 -07:00
|
|
|
r := l.next()
|
|
|
|
l.backup()
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
|
|
|
// backup steps back one rune. Can only be called once per call of next.
|
2019-12-09 11:03:31 -08:00
|
|
|
func (l *Lexer) backup() {
|
2015-03-30 09:12:51 -07:00
|
|
|
l.pos -= l.width
|
|
|
|
}
|
|
|
|
|
2019-12-09 11:03:31 -08:00
|
|
|
// emit passes an Item back to the client.
|
|
|
|
func (l *Lexer) emit(t ItemType) {
|
2020-01-09 03:26:58 -08:00
|
|
|
*l.itemp = Item{t, l.start, l.input[l.start:l.pos]}
|
2015-03-30 09:12:51 -07:00
|
|
|
l.start = l.pos
|
2020-01-09 03:26:58 -08:00
|
|
|
l.scannedItem = true
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// ignore skips over the pending input before this point.
|
2019-12-09 11:03:31 -08:00
|
|
|
func (l *Lexer) ignore() {
|
2015-03-30 09:12:51 -07:00
|
|
|
l.start = l.pos
|
|
|
|
}
|
|
|
|
|
|
|
|
// accept consumes the next rune if it's from the valid set.
|
2019-12-09 11:03:31 -08:00
|
|
|
func (l *Lexer) accept(valid string) bool {
|
2016-04-01 01:35:00 -07:00
|
|
|
if strings.ContainsRune(valid, l.next()) {
|
2015-03-30 09:12:51 -07:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
l.backup()
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2023-09-09 15:57:20 -07:00
|
|
|
// is peeks and returns true if the next rune is contained in the provided string.
|
|
|
|
func (l *Lexer) is(valid string) bool {
|
|
|
|
return strings.ContainsRune(valid, l.peek())
|
|
|
|
}
|
|
|
|
|
2015-03-30 09:12:51 -07:00
|
|
|
// acceptRun consumes a run of runes from the valid set.
|
2019-12-09 11:03:31 -08:00
|
|
|
func (l *Lexer) acceptRun(valid string) {
|
2016-04-01 01:35:00 -07:00
|
|
|
for strings.ContainsRune(valid, l.next()) {
|
2023-04-12 04:05:41 -07:00
|
|
|
// Consume.
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
l.backup()
|
|
|
|
}
|
|
|
|
|
|
|
|
// errorf returns an error token and terminates the scan by passing
|
2019-12-09 11:03:31 -08:00
|
|
|
// back a nil pointer that will be the next state, terminating l.NextItem.
|
|
|
|
func (l *Lexer) errorf(format string, args ...interface{}) stateFn {
|
2020-01-09 03:26:58 -08:00
|
|
|
*l.itemp = Item{ERROR, l.start, fmt.Sprintf(format, args...)}
|
|
|
|
l.scannedItem = true
|
|
|
|
|
2015-03-30 09:12:51 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-01-09 03:26:58 -08:00
|
|
|
// NextItem writes the next item to the provided address.
|
|
|
|
func (l *Lexer) NextItem(itemp *Item) {
|
|
|
|
l.scannedItem = false
|
|
|
|
l.itemp = itemp
|
|
|
|
|
|
|
|
if l.state != nil {
|
|
|
|
for !l.scannedItem {
|
2019-11-21 10:43:09 -08:00
|
|
|
l.state = l.state(l)
|
|
|
|
}
|
2020-01-09 03:26:58 -08:00
|
|
|
} else {
|
|
|
|
l.emit(EOF)
|
2019-11-21 10:43:09 -08:00
|
|
|
}
|
2020-01-09 03:26:58 -08:00
|
|
|
|
|
|
|
l.lastPos = l.itemp.Pos
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
2020-01-09 03:27:26 -08:00
|
|
|
// Lex creates a new scanner for the input string.
|
2019-12-09 11:03:31 -08:00
|
|
|
func Lex(input string) *Lexer {
|
|
|
|
l := &Lexer{
|
2015-03-30 09:12:51 -07:00
|
|
|
input: input,
|
2019-11-21 10:43:09 -08:00
|
|
|
state: lexStatements,
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
return l
|
|
|
|
}
|
|
|
|
|
|
|
|
// lineComment is the character that starts a line comment.
|
|
|
|
const lineComment = "#"
|
|
|
|
|
|
|
|
// lexStatements is the top-level state for lexing.
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexStatements(l *Lexer) stateFn {
|
2023-08-25 14:35:42 -07:00
|
|
|
if l.histogramState != histogramStateNone {
|
|
|
|
return lexHistogram
|
|
|
|
}
|
2015-03-30 09:12:51 -07:00
|
|
|
if l.braceOpen {
|
|
|
|
return lexInsideBraces
|
|
|
|
}
|
|
|
|
if strings.HasPrefix(l.input[l.pos:], lineComment) {
|
|
|
|
return lexLineComment
|
|
|
|
}
|
|
|
|
|
|
|
|
switch r := l.next(); {
|
|
|
|
case r == eof:
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
switch {
|
|
|
|
case l.parenDepth != 0:
|
2015-03-30 09:12:51 -07:00
|
|
|
return l.errorf("unclosed left parenthesis")
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
case l.bracketOpen:
|
2015-03-30 09:12:51 -07:00
|
|
|
return l.errorf("unclosed left bracket")
|
|
|
|
}
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(EOF)
|
2015-03-30 09:12:51 -07:00
|
|
|
return nil
|
|
|
|
case r == ',':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(COMMA)
|
2015-03-30 09:12:51 -07:00
|
|
|
case isSpace(r):
|
|
|
|
return lexSpace
|
|
|
|
case r == '*':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(MUL)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '/':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(DIV)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '%':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(MOD)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '+':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(ADD)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '-':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(SUB)
|
2016-05-29 02:06:14 -07:00
|
|
|
case r == '^':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(POW)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '=':
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
switch t := l.peek(); t {
|
|
|
|
case '=':
|
2015-03-30 09:12:51 -07:00
|
|
|
l.next()
|
2020-09-09 03:10:02 -07:00
|
|
|
l.emit(EQLC)
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
case '~':
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected character after '=': %q", t)
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
default:
|
2020-09-09 03:10:02 -07:00
|
|
|
l.emit(EQL)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
case r == '!':
|
|
|
|
if t := l.next(); t == '=' {
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(NEQ)
|
2015-03-30 09:12:51 -07:00
|
|
|
} else {
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected character after '!': %q", t)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
case r == '<':
|
|
|
|
if t := l.peek(); t == '=' {
|
|
|
|
l.next()
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(LTE)
|
2015-03-30 09:12:51 -07:00
|
|
|
} else {
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(LSS)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
case r == '>':
|
|
|
|
if t := l.peek(); t == '=' {
|
|
|
|
l.next()
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(GTE)
|
2015-03-30 09:12:51 -07:00
|
|
|
} else {
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(GTR)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
2015-07-28 17:11:13 -07:00
|
|
|
case isDigit(r) || (r == '.' && isDigit(l.peek())):
|
2015-03-30 09:12:51 -07:00
|
|
|
l.backup()
|
|
|
|
return lexNumberOrDuration
|
|
|
|
case r == '"' || r == '\'':
|
|
|
|
l.stringOpen = r
|
|
|
|
return lexString
|
2015-09-30 12:27:08 -07:00
|
|
|
case r == '`':
|
|
|
|
l.stringOpen = r
|
|
|
|
return lexRawString
|
2015-05-11 02:45:23 -07:00
|
|
|
case isAlpha(r) || r == ':':
|
2018-12-22 05:47:13 -08:00
|
|
|
if !l.bracketOpen {
|
|
|
|
l.backup()
|
|
|
|
return lexKeywordOrIdentifier
|
|
|
|
}
|
|
|
|
if l.gotColon {
|
|
|
|
return l.errorf("unexpected colon %q", r)
|
|
|
|
}
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(COLON)
|
2018-12-22 05:47:13 -08:00
|
|
|
l.gotColon = true
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '(':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(LEFT_PAREN)
|
2015-03-30 09:12:51 -07:00
|
|
|
l.parenDepth++
|
|
|
|
return lexStatements
|
|
|
|
case r == ')':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(RIGHT_PAREN)
|
2015-03-30 09:12:51 -07:00
|
|
|
l.parenDepth--
|
|
|
|
if l.parenDepth < 0 {
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected right parenthesis %q", r)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
return lexStatements
|
|
|
|
case r == '{':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(LEFT_BRACE)
|
2015-03-30 09:12:51 -07:00
|
|
|
l.braceOpen = true
|
2020-01-09 03:26:58 -08:00
|
|
|
return lexInsideBraces
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '[':
|
|
|
|
if l.bracketOpen {
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected left bracket %q", r)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
2018-12-22 05:47:13 -08:00
|
|
|
l.gotColon = false
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(LEFT_BRACKET)
|
2019-11-11 00:56:24 -08:00
|
|
|
if isSpace(l.peek()) {
|
|
|
|
skipSpaces(l)
|
|
|
|
}
|
2015-03-30 09:12:51 -07:00
|
|
|
l.bracketOpen = true
|
|
|
|
return lexDuration
|
|
|
|
case r == ']':
|
|
|
|
if !l.bracketOpen {
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected right bracket %q", r)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(RIGHT_BRACKET)
|
2015-03-30 09:12:51 -07:00
|
|
|
l.bracketOpen = false
|
2021-01-20 02:57:39 -08:00
|
|
|
case r == '@':
|
|
|
|
l.emit(AT)
|
2015-03-30 09:12:51 -07:00
|
|
|
default:
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected character: %q", r)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
2023-08-25 14:35:42 -07:00
|
|
|
func lexHistogram(l *Lexer) stateFn {
|
|
|
|
switch l.histogramState {
|
|
|
|
case histogramStateMul:
|
|
|
|
l.histogramState = histogramStateNone
|
|
|
|
l.next()
|
|
|
|
l.emit(TIMES)
|
|
|
|
return lexNumber
|
|
|
|
case histogramStateAdd:
|
|
|
|
l.histogramState = histogramStateNone
|
|
|
|
l.next()
|
|
|
|
l.emit(ADD)
|
|
|
|
return lexValueSequence
|
|
|
|
case histogramStateSub:
|
|
|
|
l.histogramState = histogramStateNone
|
|
|
|
l.next()
|
|
|
|
l.emit(SUB)
|
|
|
|
return lexValueSequence
|
|
|
|
}
|
|
|
|
|
|
|
|
if l.bracketOpen {
|
|
|
|
return lexBuckets
|
|
|
|
}
|
|
|
|
switch r := l.next(); {
|
|
|
|
case isSpace(r):
|
|
|
|
l.emit(SPACE)
|
|
|
|
return lexSpace
|
|
|
|
case isAlpha(r):
|
|
|
|
l.backup()
|
|
|
|
return lexHistogramDescriptor
|
|
|
|
case r == ':':
|
|
|
|
l.emit(COLON)
|
|
|
|
return lexHistogram
|
|
|
|
case r == '-':
|
|
|
|
l.emit(SUB)
|
2024-04-11 03:53:28 -07:00
|
|
|
return lexHistogram
|
2023-08-25 14:35:42 -07:00
|
|
|
case r == 'x':
|
|
|
|
l.emit(TIMES)
|
|
|
|
return lexNumber
|
|
|
|
case isDigit(r):
|
|
|
|
l.backup()
|
|
|
|
return lexNumber
|
|
|
|
case r == '[':
|
|
|
|
l.bracketOpen = true
|
|
|
|
l.emit(LEFT_BRACKET)
|
|
|
|
return lexBuckets
|
|
|
|
case r == '}' && l.peek() == '}':
|
|
|
|
l.next()
|
|
|
|
l.emit(CLOSE_HIST)
|
|
|
|
switch l.peek() {
|
|
|
|
case 'x':
|
|
|
|
l.histogramState = histogramStateMul
|
|
|
|
return lexHistogram
|
|
|
|
case '+':
|
|
|
|
l.histogramState = histogramStateAdd
|
|
|
|
return lexHistogram
|
|
|
|
case '-':
|
|
|
|
l.histogramState = histogramStateSub
|
|
|
|
return lexHistogram
|
|
|
|
default:
|
|
|
|
l.histogramState = histogramStateNone
|
|
|
|
return lexValueSequence
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return l.errorf("histogram description incomplete unexpected: %q", r)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func lexHistogramDescriptor(l *Lexer) stateFn {
|
|
|
|
Loop:
|
|
|
|
for {
|
|
|
|
switch r := l.next(); {
|
|
|
|
case isAlpha(r):
|
|
|
|
// absorb.
|
|
|
|
default:
|
|
|
|
l.backup()
|
|
|
|
|
|
|
|
word := l.input[l.start:l.pos]
|
|
|
|
if desc, ok := histogramDesc[strings.ToLower(word)]; ok {
|
|
|
|
if l.peek() == ':' {
|
|
|
|
l.emit(desc)
|
|
|
|
return lexHistogram
|
|
|
|
}
|
2023-11-29 09:23:34 -08:00
|
|
|
l.errorf("missing `:` for histogram descriptor")
|
2024-04-11 03:53:28 -07:00
|
|
|
break Loop
|
2023-08-25 14:35:42 -07:00
|
|
|
}
|
2024-04-11 03:53:28 -07:00
|
|
|
// Current word is Inf or NaN.
|
|
|
|
if desc, ok := key[strings.ToLower(word)]; ok {
|
|
|
|
if desc == NUMBER {
|
|
|
|
l.emit(desc)
|
|
|
|
return lexHistogram
|
|
|
|
}
|
|
|
|
}
|
|
|
|
l.errorf("bad histogram descriptor found: %q", word)
|
2023-08-25 14:35:42 -07:00
|
|
|
break Loop
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
|
|
|
func lexBuckets(l *Lexer) stateFn {
|
|
|
|
switch r := l.next(); {
|
|
|
|
case isSpace(r):
|
|
|
|
l.emit(SPACE)
|
|
|
|
return lexSpace
|
|
|
|
case isDigit(r):
|
|
|
|
l.backup()
|
|
|
|
return lexNumber
|
|
|
|
case r == ']':
|
|
|
|
l.bracketOpen = false
|
|
|
|
l.emit(RIGHT_BRACKET)
|
|
|
|
return lexHistogram
|
|
|
|
default:
|
|
|
|
return l.errorf("invalid character in buckets description: %q", r)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-30 09:12:51 -07:00
|
|
|
// lexInsideBraces scans the inside of a vector selector. Keywords are ignored and
|
|
|
|
// scanned as identifiers.
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexInsideBraces(l *Lexer) stateFn {
|
2015-03-30 09:12:51 -07:00
|
|
|
if strings.HasPrefix(l.input[l.pos:], lineComment) {
|
|
|
|
return lexLineComment
|
|
|
|
}
|
|
|
|
|
|
|
|
switch r := l.next(); {
|
|
|
|
case r == eof:
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected end of input inside braces")
|
2015-03-30 09:12:51 -07:00
|
|
|
case isSpace(r):
|
|
|
|
return lexSpace
|
2015-05-11 02:45:23 -07:00
|
|
|
case isAlpha(r):
|
2015-03-30 09:12:51 -07:00
|
|
|
l.backup()
|
|
|
|
return lexIdentifier
|
|
|
|
case r == ',':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(COMMA)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '"' || r == '\'':
|
|
|
|
l.stringOpen = r
|
|
|
|
return lexString
|
2015-09-30 12:27:08 -07:00
|
|
|
case r == '`':
|
|
|
|
l.stringOpen = r
|
|
|
|
return lexRawString
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '=':
|
|
|
|
if l.next() == '~' {
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(EQL_REGEX)
|
2015-03-30 09:12:51 -07:00
|
|
|
break
|
|
|
|
}
|
|
|
|
l.backup()
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(EQL)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '!':
|
|
|
|
switch nr := l.next(); {
|
|
|
|
case nr == '~':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(NEQ_REGEX)
|
2015-03-30 09:12:51 -07:00
|
|
|
case nr == '=':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(NEQ)
|
2015-03-30 09:12:51 -07:00
|
|
|
default:
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected character after '!' inside braces: %q", nr)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
case r == '{':
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected left brace %q", r)
|
2015-03-30 09:12:51 -07:00
|
|
|
case r == '}':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(RIGHT_BRACE)
|
2015-03-30 09:12:51 -07:00
|
|
|
l.braceOpen = false
|
2015-05-11 05:04:53 -07:00
|
|
|
|
|
|
|
if l.seriesDesc {
|
|
|
|
return lexValueSequence
|
|
|
|
}
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
default:
|
2015-04-29 07:35:18 -07:00
|
|
|
return l.errorf("unexpected character inside braces: %q", r)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
return lexInsideBraces
|
|
|
|
}
|
|
|
|
|
2015-05-12 01:39:10 -07:00
|
|
|
// lexValueSequence scans a value sequence of a series description.
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexValueSequence(l *Lexer) stateFn {
|
2023-08-25 14:35:42 -07:00
|
|
|
if l.histogramState != histogramStateNone {
|
|
|
|
return lexHistogram
|
|
|
|
}
|
2015-05-12 01:39:10 -07:00
|
|
|
switch r := l.next(); {
|
|
|
|
case r == eof:
|
|
|
|
return lexStatements
|
2023-08-25 14:35:42 -07:00
|
|
|
case r == '{' && l.peek() == '{':
|
|
|
|
if l.histogramState != histogramStateNone {
|
|
|
|
return l.errorf("unexpected histogram opening {{")
|
|
|
|
}
|
|
|
|
l.histogramState = histogramStateOpen
|
|
|
|
l.next()
|
|
|
|
l.emit(OPEN_HIST)
|
|
|
|
return lexHistogram
|
2015-05-12 01:39:10 -07:00
|
|
|
case isSpace(r):
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(SPACE)
|
2015-05-12 01:39:10 -07:00
|
|
|
lexSpace(l)
|
|
|
|
case r == '+':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(ADD)
|
2015-05-12 01:39:10 -07:00
|
|
|
case r == '-':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(SUB)
|
2015-05-12 01:39:10 -07:00
|
|
|
case r == 'x':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(TIMES)
|
2015-05-12 01:39:10 -07:00
|
|
|
case r == '_':
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(BLANK)
|
2015-07-28 17:11:13 -07:00
|
|
|
case isDigit(r) || (r == '.' && isDigit(l.peek())):
|
2015-05-12 01:39:10 -07:00
|
|
|
l.backup()
|
|
|
|
lexNumber(l)
|
|
|
|
case isAlpha(r):
|
|
|
|
l.backup()
|
2019-12-09 11:03:31 -08:00
|
|
|
// We might lex invalid Items here but this will be caught by the parser.
|
2015-05-12 01:39:10 -07:00
|
|
|
return lexKeywordOrIdentifier
|
|
|
|
default:
|
|
|
|
return l.errorf("unexpected character in series sequence: %q", r)
|
|
|
|
}
|
|
|
|
return lexValueSequence
|
|
|
|
}
|
|
|
|
|
2015-09-30 12:27:08 -07:00
|
|
|
// lexEscape scans a string escape sequence. The initial escaping character (\)
|
|
|
|
// has already been seen.
|
|
|
|
//
|
|
|
|
// NOTE: This function as well as the helper function digitVal() and associated
|
|
|
|
// tests have been adapted from the corresponding functions in the "go/scanner"
|
|
|
|
// package of the Go standard library to work for Prometheus-style strings.
|
|
|
|
// None of the actual escaping/quoting logic was changed in this function - it
|
|
|
|
// was only modified to integrate with our lexer.
|
2020-01-09 03:26:58 -08:00
|
|
|
func lexEscape(l *Lexer) stateFn {
|
2015-09-30 12:27:08 -07:00
|
|
|
var n int
|
|
|
|
var base, max uint32
|
|
|
|
|
|
|
|
ch := l.next()
|
|
|
|
switch ch {
|
|
|
|
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', l.stringOpen:
|
2020-01-09 03:26:58 -08:00
|
|
|
return lexString
|
2015-09-30 12:27:08 -07:00
|
|
|
case '0', '1', '2', '3', '4', '5', '6', '7':
|
|
|
|
n, base, max = 3, 8, 255
|
|
|
|
case 'x':
|
|
|
|
ch = l.next()
|
|
|
|
n, base, max = 2, 16, 255
|
|
|
|
case 'u':
|
|
|
|
ch = l.next()
|
|
|
|
n, base, max = 4, 16, unicode.MaxRune
|
|
|
|
case 'U':
|
|
|
|
ch = l.next()
|
|
|
|
n, base, max = 8, 16, unicode.MaxRune
|
|
|
|
case eof:
|
|
|
|
l.errorf("escape sequence not terminated")
|
2020-01-09 03:26:58 -08:00
|
|
|
return lexString
|
2015-09-30 12:27:08 -07:00
|
|
|
default:
|
|
|
|
l.errorf("unknown escape sequence %#U", ch)
|
2020-01-09 03:26:58 -08:00
|
|
|
return lexString
|
2015-09-30 12:27:08 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
var x uint32
|
|
|
|
for n > 0 {
|
|
|
|
d := uint32(digitVal(ch))
|
|
|
|
if d >= base {
|
|
|
|
if ch == eof {
|
|
|
|
l.errorf("escape sequence not terminated")
|
2020-01-09 03:26:58 -08:00
|
|
|
return lexString
|
2015-09-30 12:27:08 -07:00
|
|
|
}
|
|
|
|
l.errorf("illegal character %#U in escape sequence", ch)
|
2020-01-09 03:26:58 -08:00
|
|
|
return lexString
|
2015-09-30 12:27:08 -07:00
|
|
|
}
|
|
|
|
x = x*base + d
|
|
|
|
n--
|
2021-02-18 22:38:05 -08:00
|
|
|
|
|
|
|
// Don't seek after last rune.
|
|
|
|
if n > 0 {
|
|
|
|
ch = l.next()
|
|
|
|
}
|
2015-09-30 12:27:08 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if x > max || 0xD800 <= x && x < 0xE000 {
|
|
|
|
l.errorf("escape sequence is an invalid Unicode code point")
|
|
|
|
}
|
2020-01-09 03:26:58 -08:00
|
|
|
return lexString
|
2015-09-30 12:27:08 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// digitVal returns the digit value of a rune or 16 in case the rune does not
|
|
|
|
// represent a valid digit.
|
|
|
|
func digitVal(ch rune) int {
|
|
|
|
switch {
|
|
|
|
case '0' <= ch && ch <= '9':
|
|
|
|
return int(ch - '0')
|
|
|
|
case 'a' <= ch && ch <= 'f':
|
|
|
|
return int(ch - 'a' + 10)
|
|
|
|
case 'A' <= ch && ch <= 'F':
|
|
|
|
return int(ch - 'A' + 10)
|
|
|
|
}
|
|
|
|
return 16 // Larger than any legal digit val.
|
|
|
|
}
|
|
|
|
|
2019-11-11 00:56:24 -08:00
|
|
|
// skipSpaces skips the spaces until a non-space is encountered.
|
2019-12-09 11:03:31 -08:00
|
|
|
func skipSpaces(l *Lexer) {
|
2019-11-11 00:56:24 -08:00
|
|
|
for isSpace(l.peek()) {
|
|
|
|
l.next()
|
|
|
|
}
|
|
|
|
l.ignore()
|
|
|
|
}
|
|
|
|
|
2015-03-30 09:12:51 -07:00
|
|
|
// lexString scans a quoted string. The initial quote has already been seen.
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexString(l *Lexer) stateFn {
|
2015-03-30 09:12:51 -07:00
|
|
|
Loop:
|
|
|
|
for {
|
|
|
|
switch l.next() {
|
|
|
|
case '\\':
|
2020-01-09 03:26:58 -08:00
|
|
|
return lexEscape
|
2017-06-16 07:19:24 -07:00
|
|
|
case utf8.RuneError:
|
2020-01-09 03:26:58 -08:00
|
|
|
l.errorf("invalid UTF-8 rune")
|
|
|
|
return lexString
|
2015-03-30 09:12:51 -07:00
|
|
|
case eof, '\n':
|
|
|
|
return l.errorf("unterminated quoted string")
|
|
|
|
case l.stringOpen:
|
|
|
|
break Loop
|
|
|
|
}
|
|
|
|
}
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(STRING)
|
2015-09-30 12:27:08 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
|
|
|
// lexRawString scans a raw quoted string. The initial quote has already been seen.
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexRawString(l *Lexer) stateFn {
|
2015-09-30 12:27:08 -07:00
|
|
|
Loop:
|
|
|
|
for {
|
|
|
|
switch l.next() {
|
2017-06-16 07:19:24 -07:00
|
|
|
case utf8.RuneError:
|
2020-01-09 03:26:58 -08:00
|
|
|
l.errorf("invalid UTF-8 rune")
|
|
|
|
return lexRawString
|
2015-09-30 12:27:08 -07:00
|
|
|
case eof:
|
2020-01-09 03:26:58 -08:00
|
|
|
l.errorf("unterminated raw string")
|
|
|
|
return lexRawString
|
2015-09-30 12:27:08 -07:00
|
|
|
case l.stringOpen:
|
|
|
|
break Loop
|
|
|
|
}
|
|
|
|
}
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(STRING)
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
|
|
|
// lexSpace scans a run of space characters. One space has already been seen.
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexSpace(l *Lexer) stateFn {
|
2015-03-30 09:12:51 -07:00
|
|
|
for isSpace(l.peek()) {
|
|
|
|
l.next()
|
|
|
|
}
|
|
|
|
l.ignore()
|
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
|
|
|
// lexLineComment scans a line comment. Left comment marker is known to be present.
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexLineComment(l *Lexer) stateFn {
|
2023-09-14 09:57:31 -07:00
|
|
|
l.pos += posrange.Pos(len(lineComment))
|
2015-03-30 09:12:51 -07:00
|
|
|
for r := l.next(); !isEndOfLine(r) && r != eof; {
|
|
|
|
r = l.next()
|
|
|
|
}
|
|
|
|
l.backup()
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(COMMENT)
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexDuration(l *Lexer) stateFn {
|
2015-03-30 09:12:51 -07:00
|
|
|
if l.scanNumber() {
|
|
|
|
return l.errorf("missing unit character in duration")
|
|
|
|
}
|
2020-08-04 12:12:41 -07:00
|
|
|
if !acceptRemainingDuration(l) {
|
|
|
|
return l.errorf("bad duration syntax: %q", l.input[l.start:l.pos])
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
2020-08-04 12:12:41 -07:00
|
|
|
l.backup()
|
|
|
|
l.emit(DURATION)
|
|
|
|
return lexStatements
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// lexNumber scans a number: decimal, hex, oct or float.
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexNumber(l *Lexer) stateFn {
|
2015-03-30 09:12:51 -07:00
|
|
|
if !l.scanNumber() {
|
|
|
|
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
|
|
|
|
}
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(NUMBER)
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
2019-12-09 11:03:31 -08:00
|
|
|
// lexNumberOrDuration scans a number or a duration Item.
|
|
|
|
func lexNumberOrDuration(l *Lexer) stateFn {
|
2015-03-30 09:12:51 -07:00
|
|
|
if l.scanNumber() {
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(NUMBER)
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
// Next two chars must be a valid unit and a non-alphanumeric.
|
2020-08-04 12:12:41 -07:00
|
|
|
if acceptRemainingDuration(l) {
|
2015-04-29 07:35:18 -07:00
|
|
|
l.backup()
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(DURATION)
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
return l.errorf("bad number or duration syntax: %q", l.input[l.start:l.pos])
|
|
|
|
}
|
|
|
|
|
2020-08-04 12:12:41 -07:00
|
|
|
func acceptRemainingDuration(l *Lexer) bool {
|
|
|
|
// Next two char must be a valid duration.
|
|
|
|
if !l.accept("smhdwy") {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
// Support for ms. Bad units like hs, ys will be caught when we actually
|
|
|
|
// parse the duration.
|
|
|
|
l.accept("s")
|
|
|
|
// Next char can be another number then a unit.
|
|
|
|
for l.accept("0123456789") {
|
|
|
|
for l.accept("0123456789") {
|
|
|
|
}
|
|
|
|
// y is no longer in the list as it should always come first in
|
|
|
|
// durations.
|
|
|
|
if !l.accept("smhdw") {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
// Support for ms. Bad units like hs, ys will be caught when we actually
|
|
|
|
// parse the duration.
|
|
|
|
l.accept("s")
|
|
|
|
}
|
|
|
|
return !isAlphaNumeric(l.next())
|
|
|
|
}
|
|
|
|
|
2019-12-09 11:03:31 -08:00
|
|
|
// scanNumber scans numbers of different formats. The scanned Item is
|
2015-03-30 09:12:51 -07:00
|
|
|
// not necessarily a valid number. This case is caught by the parser.
|
2019-12-09 11:03:31 -08:00
|
|
|
func (l *Lexer) scanNumber() bool {
|
2023-09-09 15:57:20 -07:00
|
|
|
// Modify the digit pattern if the number is hexadecimal.
|
|
|
|
digitPattern := "0123456789"
|
2015-06-04 09:07:57 -07:00
|
|
|
// Disallow hexadecimal in series descriptions as the syntax is ambiguous.
|
2023-09-09 15:57:20 -07:00
|
|
|
if !l.seriesDesc &&
|
|
|
|
l.accept("0") && l.accept("xX") {
|
|
|
|
l.accept("_") // eg., 0X_1FFFP-16 == 0.1249847412109375
|
|
|
|
digitPattern = "0123456789abcdefABCDEF"
|
|
|
|
}
|
|
|
|
const (
|
|
|
|
// Define dot, exponent, and underscore patterns.
|
|
|
|
dotPattern = "."
|
|
|
|
exponentPattern = "eE"
|
|
|
|
underscorePattern = "_"
|
|
|
|
// Anti-patterns are rune sets that cannot follow their respective rune.
|
|
|
|
dotAntiPattern = "_."
|
|
|
|
exponentAntiPattern = "._eE" // and EOL.
|
|
|
|
underscoreAntiPattern = "._eE" // and EOL.
|
|
|
|
)
|
|
|
|
// All numbers follow the prefix: [.][d][d._eE]*
|
|
|
|
l.accept(dotPattern)
|
|
|
|
l.accept(digitPattern)
|
|
|
|
// [d._eE]* hereon.
|
|
|
|
dotConsumed := false
|
|
|
|
exponentConsumed := false
|
|
|
|
for l.is(digitPattern + dotPattern + underscorePattern + exponentPattern) {
|
|
|
|
// "." cannot repeat.
|
|
|
|
if l.is(dotPattern) {
|
|
|
|
if dotConsumed {
|
|
|
|
l.accept(dotPattern)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// "eE" cannot repeat.
|
|
|
|
if l.is(exponentPattern) {
|
|
|
|
if exponentConsumed {
|
|
|
|
l.accept(exponentPattern)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Handle dots.
|
|
|
|
if l.accept(dotPattern) {
|
|
|
|
dotConsumed = true
|
|
|
|
if l.accept(dotAntiPattern) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
// Fractional hexadecimal literals are not allowed.
|
|
|
|
if len(digitPattern) > 10 /* 0x[\da-fA-F].[\d]+p[\d] */ {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Handle exponents.
|
|
|
|
if l.accept(exponentPattern) {
|
|
|
|
exponentConsumed = true
|
|
|
|
l.accept("+-")
|
|
|
|
if l.accept(exponentAntiPattern) || l.peek() == eof {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Handle underscores.
|
|
|
|
if l.accept(underscorePattern) {
|
|
|
|
if l.accept(underscoreAntiPattern) || l.peek() == eof {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Handle digits at the end since we already consumed before this loop.
|
|
|
|
l.acceptRun(digitPattern)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
2023-09-09 15:57:20 -07:00
|
|
|
|
2015-06-04 09:21:24 -07:00
|
|
|
// Next thing must not be alphanumeric unless it's the times token
|
|
|
|
// for series repetitions.
|
|
|
|
if r := l.peek(); (l.seriesDesc && r == 'x') || !isAlphaNumeric(r) {
|
|
|
|
return true
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
2015-06-04 09:21:24 -07:00
|
|
|
return false
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
2015-04-29 07:35:18 -07:00
|
|
|
// lexIdentifier scans an alphanumeric identifier. The next character
|
|
|
|
// is known to be a letter.
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexIdentifier(l *Lexer) stateFn {
|
2015-03-30 09:12:51 -07:00
|
|
|
for isAlphaNumeric(l.next()) {
|
|
|
|
// absorb
|
|
|
|
}
|
|
|
|
l.backup()
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(IDENTIFIER)
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
|
|
|
// lexKeywordOrIdentifier scans an alphanumeric identifier which may contain
|
2019-12-09 11:03:31 -08:00
|
|
|
// a colon rune. If the identifier is a keyword the respective keyword Item
|
2015-03-30 09:12:51 -07:00
|
|
|
// is scanned.
|
2019-12-09 11:03:31 -08:00
|
|
|
func lexKeywordOrIdentifier(l *Lexer) stateFn {
|
2015-03-30 09:12:51 -07:00
|
|
|
Loop:
|
|
|
|
for {
|
|
|
|
switch r := l.next(); {
|
|
|
|
case isAlphaNumeric(r) || r == ':':
|
|
|
|
// absorb.
|
|
|
|
default:
|
|
|
|
l.backup()
|
|
|
|
word := l.input[l.start:l.pos]
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
switch kw, ok := key[strings.ToLower(word)]; {
|
|
|
|
case ok:
|
2015-03-30 09:12:51 -07:00
|
|
|
l.emit(kw)
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
case !strings.Contains(word, ":"):
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(IDENTIFIER)
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
default:
|
2019-11-26 05:29:42 -08:00
|
|
|
l.emit(METRIC_IDENTIFIER)
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
break Loop
|
|
|
|
}
|
|
|
|
}
|
2015-05-11 05:04:53 -07:00
|
|
|
if l.seriesDesc && l.peek() != '{' {
|
|
|
|
return lexValueSequence
|
|
|
|
}
|
2015-03-30 09:12:51 -07:00
|
|
|
return lexStatements
|
|
|
|
}
|
|
|
|
|
|
|
|
func isSpace(r rune) bool {
|
2015-06-02 09:33:49 -07:00
|
|
|
return r == ' ' || r == '\t' || r == '\n' || r == '\r'
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// isEndOfLine reports whether r is an end-of-line character.
|
|
|
|
func isEndOfLine(r rune) bool {
|
|
|
|
return r == '\r' || r == '\n'
|
|
|
|
}
|
|
|
|
|
|
|
|
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
|
|
|
|
func isAlphaNumeric(r rune) bool {
|
2015-07-28 17:11:13 -07:00
|
|
|
return isAlpha(r) || isDigit(r)
|
|
|
|
}
|
|
|
|
|
|
|
|
// isDigit reports whether r is a digit. Note: we cannot use unicode.IsDigit()
|
|
|
|
// instead because that also classifies non-Latin digits as digits. See
|
|
|
|
// https://github.com/prometheus/prometheus/issues/939.
|
|
|
|
func isDigit(r rune) bool {
|
|
|
|
return '0' <= r && r <= '9'
|
2015-05-11 02:45:23 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// isAlpha reports whether r is an alphabetic or underscore.
|
|
|
|
func isAlpha(r rune) bool {
|
|
|
|
return r == '_' || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z')
|
2015-03-30 09:12:51 -07:00
|
|
|
}
|
Fix parsing of label names which are also keywords
The current separation between lexer and parser is a bit fuzzy when it
comes to operators, aggregators and other keywords. The lexer already
tries to determine the type of a token, even though that type might
change depending on the context.
This led to the problematic behavior that no tokens known to the lexer
could be used as label names, including operators (and, by, ...),
aggregators (count, quantile, ...) or other keywords (for, offset, ...).
This change additionally checks whether an identifier is one of these
types. We might want to check whether the specific item identification
should be moved from the lexer to the parser.
2016-09-07 12:16:34 -07:00
|
|
|
|
|
|
|
// isLabel reports whether the string can be used as label.
|
|
|
|
func isLabel(s string) bool {
|
|
|
|
if len(s) == 0 || !isAlpha(rune(s[0])) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for _, c := range s[1:] {
|
|
|
|
if !isAlphaNumeric(c) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|