2017-04-19 05:43:09 -07:00
|
|
|
// Copyright 2017 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2017-01-16 11:34:49 -08:00
|
|
|
//go:generate go get github.com/cznic/golex
|
2017-01-14 07:39:04 -08:00
|
|
|
//go:generate golex -o=lex.l.go lex.l
|
2017-01-16 11:34:49 -08:00
|
|
|
|
|
|
|
// Package textparse contains an efficient parser for the Prometheus text format.
|
2017-01-14 07:39:04 -08:00
|
|
|
package textparse
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
2018-05-14 13:19:53 -07:00
|
|
|
"fmt"
|
2017-01-14 07:39:04 -08:00
|
|
|
"io"
|
2018-05-14 13:19:53 -07:00
|
|
|
"math"
|
2017-01-14 10:30:19 -08:00
|
|
|
"sort"
|
2018-05-14 13:19:53 -07:00
|
|
|
"strconv"
|
2017-07-27 06:15:41 -07:00
|
|
|
"strings"
|
2018-05-14 13:19:53 -07:00
|
|
|
"unicode/utf8"
|
2017-01-14 07:39:04 -08:00
|
|
|
"unsafe"
|
|
|
|
|
2017-01-14 10:30:19 -08:00
|
|
|
"github.com/prometheus/prometheus/pkg/labels"
|
2018-06-01 01:22:32 -07:00
|
|
|
"github.com/prometheus/prometheus/pkg/value"
|
2017-01-14 07:39:04 -08:00
|
|
|
)
|
|
|
|
|
|
|
|
type lexer struct {
|
2018-05-14 13:19:53 -07:00
|
|
|
b []byte
|
|
|
|
i int
|
|
|
|
start int
|
|
|
|
err error
|
2017-07-07 01:29:38 -07:00
|
|
|
state int
|
2017-01-14 07:39:04 -08:00
|
|
|
}
|
|
|
|
|
2018-05-14 13:19:53 -07:00
|
|
|
type token int
|
|
|
|
|
|
|
|
const (
|
|
|
|
tInvalid token = -1
|
|
|
|
tEOF token = 0
|
|
|
|
tLinebreak token = iota
|
|
|
|
tWhitespace
|
|
|
|
tHelp
|
|
|
|
tType
|
|
|
|
tText
|
|
|
|
tComment
|
|
|
|
tBlank
|
|
|
|
tMName
|
|
|
|
tBraceOpen
|
|
|
|
tBraceClose
|
|
|
|
tLName
|
|
|
|
tLValue
|
|
|
|
tComma
|
|
|
|
tEqual
|
|
|
|
tTimestamp
|
|
|
|
tValue
|
|
|
|
)
|
|
|
|
|
|
|
|
func (t token) String() string {
|
|
|
|
switch t {
|
|
|
|
case tInvalid:
|
|
|
|
return "INVALID"
|
|
|
|
case tEOF:
|
|
|
|
return "EOF"
|
|
|
|
case tLinebreak:
|
|
|
|
return "LINEBREAK"
|
|
|
|
case tWhitespace:
|
|
|
|
return "WHITESPACE"
|
|
|
|
case tHelp:
|
|
|
|
return "HELP"
|
|
|
|
case tType:
|
|
|
|
return "TYPE"
|
|
|
|
case tText:
|
|
|
|
return "TEXT"
|
|
|
|
case tComment:
|
|
|
|
return "COMMENT"
|
|
|
|
case tBlank:
|
|
|
|
return "BLANK"
|
|
|
|
case tMName:
|
|
|
|
return "MNAME"
|
|
|
|
case tBraceOpen:
|
|
|
|
return "BOPEN"
|
|
|
|
case tBraceClose:
|
|
|
|
return "BCLOSE"
|
|
|
|
case tLName:
|
|
|
|
return "LNAME"
|
|
|
|
case tLValue:
|
|
|
|
return "LVALUE"
|
|
|
|
case tEqual:
|
|
|
|
return "EQUAL"
|
|
|
|
case tComma:
|
|
|
|
return "COMMA"
|
|
|
|
case tTimestamp:
|
|
|
|
return "TIMESTAMP"
|
|
|
|
case tValue:
|
|
|
|
return "VALUE"
|
|
|
|
}
|
|
|
|
return fmt.Sprintf("<invalid: %d>", t)
|
|
|
|
}
|
|
|
|
|
|
|
|
// buf returns the buffer of the current token.
|
|
|
|
func (l *lexer) buf() []byte {
|
|
|
|
return l.b[l.start:l.i]
|
|
|
|
}
|
2017-01-14 07:39:04 -08:00
|
|
|
|
2018-05-14 13:19:53 -07:00
|
|
|
func (l *lexer) cur() byte {
|
|
|
|
return l.b[l.i]
|
|
|
|
}
|
|
|
|
|
|
|
|
// next advances the lexer to the next character.
|
2017-01-14 07:39:04 -08:00
|
|
|
func (l *lexer) next() byte {
|
|
|
|
l.i++
|
|
|
|
if l.i >= len(l.b) {
|
|
|
|
l.err = io.EOF
|
2018-05-14 13:19:53 -07:00
|
|
|
return byte(tEOF)
|
2017-01-14 07:39:04 -08:00
|
|
|
}
|
2018-06-01 01:22:32 -07:00
|
|
|
// Lex struggles with null bytes. If we are in a label value or help string, where
|
2018-05-14 13:19:53 -07:00
|
|
|
// they are allowed, consume them here immediately.
|
2018-06-01 01:22:32 -07:00
|
|
|
for l.b[l.i] == 0 && (l.state == sLValue || l.state == sMeta2 || l.state == sComment) {
|
2018-05-14 13:19:53 -07:00
|
|
|
l.i++
|
2017-07-07 01:29:38 -07:00
|
|
|
}
|
2018-05-14 13:19:53 -07:00
|
|
|
return l.b[l.i]
|
2017-01-14 07:39:04 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (l *lexer) Error(es string) {
|
|
|
|
l.err = errors.New(es)
|
|
|
|
}
|
|
|
|
|
2017-01-16 11:34:49 -08:00
|
|
|
// Parser parses samples from a byte slice of samples in the official
|
|
|
|
// Prometheus text exposition format.
|
2017-01-14 07:39:04 -08:00
|
|
|
type Parser struct {
|
2018-05-14 13:19:53 -07:00
|
|
|
l *lexer
|
|
|
|
series []byte
|
|
|
|
text []byte
|
|
|
|
mtype MetricType
|
|
|
|
val float64
|
|
|
|
ts int64
|
|
|
|
hasTS bool
|
|
|
|
start int
|
|
|
|
offsets []int
|
2017-01-14 07:39:04 -08:00
|
|
|
}
|
|
|
|
|
2017-01-16 11:34:49 -08:00
|
|
|
// New returns a new parser of the byte slice.
|
2017-01-14 07:39:04 -08:00
|
|
|
func New(b []byte) *Parser {
|
2018-05-14 13:19:53 -07:00
|
|
|
return &Parser{l: &lexer{b: append(b, '\n')}}
|
2017-01-14 07:39:04 -08:00
|
|
|
}
|
|
|
|
|
2018-05-14 13:19:53 -07:00
|
|
|
// Series returns the bytes of the series, the timestamp if set, and the value
|
|
|
|
// of the current sample.
|
|
|
|
func (p *Parser) Series() ([]byte, *int64, float64) {
|
|
|
|
if p.hasTS {
|
|
|
|
return p.series, &p.ts, p.val
|
2017-01-14 07:39:04 -08:00
|
|
|
}
|
2018-05-14 13:19:53 -07:00
|
|
|
return p.series, nil, p.val
|
2017-01-14 07:39:04 -08:00
|
|
|
}
|
|
|
|
|
2018-05-14 13:19:53 -07:00
|
|
|
// Help returns the metric name and help text in the current entry.
|
|
|
|
// Must only be called after Next returned a help entry.
|
|
|
|
// The returned byte slices become invalid after the next call to Next.
|
|
|
|
func (p *Parser) Help() ([]byte, []byte) {
|
2018-06-05 03:24:20 -07:00
|
|
|
m := p.l.b[p.offsets[0]:p.offsets[1]]
|
|
|
|
|
|
|
|
// Replacer causes allocations. Replace only when necessary.
|
|
|
|
if strings.IndexByte(yoloString(p.text), byte('\\')) >= 0 {
|
|
|
|
return m, []byte(helpReplacer.Replace(string(p.text)))
|
|
|
|
}
|
|
|
|
return m, p.text
|
2017-01-14 07:39:04 -08:00
|
|
|
}
|
|
|
|
|
2018-05-14 13:19:53 -07:00
|
|
|
// Type returns the metric name and type in the current entry.
|
|
|
|
// Must only be called after Next returned a type entry.
|
|
|
|
// The returned byte slices become invalid after the next call to Next.
|
|
|
|
func (p *Parser) Type() ([]byte, MetricType) {
|
|
|
|
return p.l.b[p.offsets[0]:p.offsets[1]], p.mtype
|
|
|
|
}
|
|
|
|
|
|
|
|
// Comment returns the text of the current comment.
|
|
|
|
// Must only be called after Next returned a comment entry.
|
|
|
|
// The returned byte slice becomes invalid after the next call to Next.
|
|
|
|
func (p *Parser) Comment() []byte {
|
|
|
|
return p.text
|
2017-01-14 07:39:04 -08:00
|
|
|
}
|
|
|
|
|
2017-01-16 11:34:49 -08:00
|
|
|
// Metric writes the labels of the current sample into the passed labels.
|
2017-05-26 01:44:48 -07:00
|
|
|
// It returns the string from which the metric was parsed.
|
2017-05-25 23:44:24 -07:00
|
|
|
func (p *Parser) Metric(l *labels.Labels) string {
|
2017-01-16 08:24:00 -08:00
|
|
|
// Allocate the full immutable string immediately, so we just
|
|
|
|
// have to create references on it below.
|
2018-05-14 13:19:53 -07:00
|
|
|
s := string(p.series)
|
2017-01-16 08:24:00 -08:00
|
|
|
|
2017-01-14 10:30:19 -08:00
|
|
|
*l = append(*l, labels.Label{
|
|
|
|
Name: labels.MetricName,
|
2018-05-14 13:19:53 -07:00
|
|
|
Value: s[:p.offsets[0]-p.start],
|
2017-01-14 10:30:19 -08:00
|
|
|
})
|
|
|
|
|
2018-05-14 13:19:53 -07:00
|
|
|
for i := 1; i < len(p.offsets); i += 4 {
|
|
|
|
a := p.offsets[i] - p.start
|
|
|
|
b := p.offsets[i+1] - p.start
|
|
|
|
c := p.offsets[i+2] - p.start
|
|
|
|
d := p.offsets[i+3] - p.start
|
2017-01-14 10:30:19 -08:00
|
|
|
|
2017-07-27 06:15:41 -07:00
|
|
|
// Replacer causes allocations. Replace only when necessary.
|
|
|
|
if strings.IndexByte(s[c:d], byte('\\')) >= 0 {
|
2018-06-05 03:24:20 -07:00
|
|
|
*l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])})
|
2017-07-27 06:15:41 -07:00
|
|
|
continue
|
|
|
|
}
|
2017-06-22 00:38:55 -07:00
|
|
|
*l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]})
|
2017-01-14 10:30:19 -08:00
|
|
|
}
|
|
|
|
|
2018-05-14 13:19:53 -07:00
|
|
|
// Sort labels. We can skip the first entry since the metric name is
|
|
|
|
// already at the right place.
|
2017-01-14 10:30:19 -08:00
|
|
|
sort.Sort((*l)[1:])
|
2017-05-25 23:44:24 -07:00
|
|
|
|
|
|
|
return s
|
2017-01-14 07:39:04 -08:00
|
|
|
}
|
|
|
|
|
2018-05-14 13:19:53 -07:00
|
|
|
// nextToken returns the next token from the lexer. It skips over tabs
|
|
|
|
// and spaces.
|
|
|
|
func (p *Parser) nextToken() token {
|
|
|
|
for {
|
|
|
|
if tok := p.l.Lex(); tok != tWhitespace {
|
|
|
|
return tok
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Entry represents the type of a parsed entry.
|
|
|
|
type Entry int
|
|
|
|
|
|
|
|
const (
|
|
|
|
EntryInvalid Entry = -1
|
|
|
|
EntryType Entry = 0
|
|
|
|
EntryHelp Entry = 1
|
|
|
|
EntrySeries Entry = 2
|
|
|
|
EntryComment Entry = 3
|
|
|
|
)
|
|
|
|
|
|
|
|
// MetricType represents metric type values.
|
|
|
|
type MetricType string
|
|
|
|
|
|
|
|
const (
|
|
|
|
MetricTypeCounter = "counter"
|
|
|
|
MetricTypeGauge = "gauge"
|
|
|
|
MetricTypeHistogram = "histogram"
|
|
|
|
MetricTypeSummary = "summary"
|
|
|
|
MetricTypeUntyped = "untyped"
|
|
|
|
)
|
|
|
|
|
|
|
|
func parseError(exp string, got token) error {
|
|
|
|
return fmt.Errorf("%s, got %q", exp, got)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next advances the parser to the next sample. It returns false if no
|
|
|
|
// more samples were read or an error occurred.
|
|
|
|
func (p *Parser) Next() (Entry, error) {
|
|
|
|
var err error
|
|
|
|
|
|
|
|
p.start = p.l.i
|
|
|
|
p.offsets = p.offsets[:0]
|
|
|
|
|
|
|
|
switch t := p.nextToken(); t {
|
|
|
|
case tEOF:
|
|
|
|
return EntryInvalid, io.EOF
|
|
|
|
case tLinebreak:
|
|
|
|
// Allow full blank lines.
|
|
|
|
return p.Next()
|
|
|
|
|
|
|
|
case tHelp, tType:
|
|
|
|
switch t := p.nextToken(); t {
|
|
|
|
case tMName:
|
|
|
|
p.offsets = append(p.offsets, p.l.start, p.l.i)
|
|
|
|
default:
|
|
|
|
return EntryInvalid, parseError("expected metric name after HELP", t)
|
|
|
|
}
|
|
|
|
switch t := p.nextToken(); t {
|
|
|
|
case tText:
|
2018-08-01 05:29:58 -07:00
|
|
|
if len(p.l.buf()) > 1 {
|
|
|
|
p.text = p.l.buf()[1:]
|
|
|
|
} else {
|
|
|
|
p.text = []byte{}
|
|
|
|
}
|
2018-05-14 13:19:53 -07:00
|
|
|
default:
|
|
|
|
return EntryInvalid, parseError("expected text in HELP", t)
|
|
|
|
}
|
2018-06-01 01:22:32 -07:00
|
|
|
switch t {
|
|
|
|
case tType:
|
2018-05-14 13:19:53 -07:00
|
|
|
switch s := yoloString(p.text); s {
|
|
|
|
case "counter":
|
|
|
|
p.mtype = MetricTypeCounter
|
|
|
|
case "gauge":
|
|
|
|
p.mtype = MetricTypeGauge
|
|
|
|
case "histogram":
|
|
|
|
p.mtype = MetricTypeHistogram
|
|
|
|
case "summary":
|
|
|
|
p.mtype = MetricTypeSummary
|
|
|
|
case "untyped":
|
|
|
|
p.mtype = MetricTypeUntyped
|
|
|
|
default:
|
|
|
|
return EntryInvalid, fmt.Errorf("invalid metric type %q", s)
|
|
|
|
}
|
2018-06-01 01:22:32 -07:00
|
|
|
case tHelp:
|
|
|
|
if !utf8.Valid(p.text) {
|
|
|
|
return EntryInvalid, fmt.Errorf("help text is not a valid utf8 string")
|
|
|
|
}
|
2018-05-14 13:19:53 -07:00
|
|
|
}
|
|
|
|
if t := p.nextToken(); t != tLinebreak {
|
|
|
|
return EntryInvalid, parseError("linebreak expected after metadata", t)
|
|
|
|
}
|
|
|
|
switch t {
|
|
|
|
case tHelp:
|
|
|
|
return EntryHelp, nil
|
|
|
|
case tType:
|
|
|
|
return EntryType, nil
|
|
|
|
}
|
|
|
|
case tComment:
|
|
|
|
p.text = p.l.buf()
|
|
|
|
if t := p.nextToken(); t != tLinebreak {
|
|
|
|
return EntryInvalid, parseError("linebreak expected after comment", t)
|
|
|
|
}
|
|
|
|
return EntryComment, nil
|
|
|
|
|
|
|
|
case tMName:
|
|
|
|
p.offsets = append(p.offsets, p.l.i)
|
|
|
|
p.series = p.l.b[p.start:p.l.i]
|
|
|
|
|
|
|
|
t2 := p.nextToken()
|
|
|
|
if t2 == tBraceOpen {
|
|
|
|
if err := p.parseLVals(); err != nil {
|
|
|
|
return EntryInvalid, err
|
|
|
|
}
|
|
|
|
p.series = p.l.b[p.start:p.l.i]
|
|
|
|
t2 = p.nextToken()
|
|
|
|
}
|
|
|
|
if t2 != tValue {
|
|
|
|
return EntryInvalid, parseError("expected value after metric", t)
|
|
|
|
}
|
|
|
|
if p.val, err = strconv.ParseFloat(yoloString(p.l.buf()), 64); err != nil {
|
|
|
|
return EntryInvalid, err
|
|
|
|
}
|
|
|
|
// Ensure canonical NaN value.
|
|
|
|
if math.IsNaN(p.val) {
|
|
|
|
p.val = math.Float64frombits(value.NormalNaN)
|
|
|
|
}
|
|
|
|
p.hasTS = false
|
|
|
|
switch p.nextToken() {
|
|
|
|
case tLinebreak:
|
|
|
|
break
|
|
|
|
case tTimestamp:
|
|
|
|
p.hasTS = true
|
|
|
|
if p.ts, err = strconv.ParseInt(yoloString(p.l.buf()), 10, 64); err != nil {
|
|
|
|
return EntryInvalid, err
|
|
|
|
}
|
|
|
|
if t2 := p.nextToken(); t2 != tLinebreak {
|
|
|
|
return EntryInvalid, parseError("expected next entry after timestamp", t)
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return EntryInvalid, parseError("expected timestamp or new record", t)
|
|
|
|
}
|
|
|
|
return EntrySeries, nil
|
|
|
|
|
|
|
|
default:
|
|
|
|
err = fmt.Errorf("%q is not a valid start token", t)
|
|
|
|
}
|
|
|
|
return EntryInvalid, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Parser) parseLVals() error {
|
|
|
|
t := p.nextToken()
|
|
|
|
for {
|
|
|
|
switch t {
|
|
|
|
case tBraceClose:
|
|
|
|
return nil
|
|
|
|
case tLName:
|
|
|
|
default:
|
|
|
|
return parseError("expected label name", t)
|
|
|
|
}
|
|
|
|
p.offsets = append(p.offsets, p.l.start, p.l.i)
|
|
|
|
|
|
|
|
if t := p.nextToken(); t != tEqual {
|
|
|
|
return parseError("expected equal", t)
|
|
|
|
}
|
|
|
|
if t := p.nextToken(); t != tLValue {
|
|
|
|
return parseError("expected label value", t)
|
|
|
|
}
|
|
|
|
if !utf8.Valid(p.l.buf()) {
|
|
|
|
return fmt.Errorf("invalid UTF-8 label value")
|
|
|
|
}
|
|
|
|
|
|
|
|
// The lexer ensures the value string is quoted. Strip first
|
|
|
|
// and last character.
|
|
|
|
p.offsets = append(p.offsets, p.l.start+1, p.l.i-1)
|
|
|
|
|
|
|
|
// Free trailing commas are allowed.
|
|
|
|
if t = p.nextToken(); t == tComma {
|
|
|
|
t = p.nextToken()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-05 03:24:20 -07:00
|
|
|
var lvalReplacer = strings.NewReplacer(
|
|
|
|
`\"`, "\"",
|
|
|
|
`\\`, "\\",
|
|
|
|
`\n`, "\n",
|
|
|
|
)
|
|
|
|
|
|
|
|
var helpReplacer = strings.NewReplacer(
|
|
|
|
`\\`, "\\",
|
|
|
|
`\n`, "\n",
|
2017-07-27 06:15:41 -07:00
|
|
|
)
|
|
|
|
|
2017-01-14 07:39:04 -08:00
|
|
|
func yoloString(b []byte) string {
|
2017-03-07 02:41:11 -08:00
|
|
|
return *((*string)(unsafe.Pointer(&b)))
|
2017-01-14 07:39:04 -08:00
|
|
|
}
|