diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index b7ad1dd85..5f0415d3e 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -94,16 +94,46 @@ type OpenMetricsParser struct { exemplarVal float64 exemplarTs int64 hasExemplarTs bool + + skipCTSeries bool } -// NewOpenMetricsParser returns a new parser of the byte slice. -func NewOpenMetricsParser(b []byte, st *labels.SymbolTable) Parser { - return &OpenMetricsParser{ - l: &openMetricsLexer{b: b}, - builder: labels.NewScratchBuilderWithSymbolTable(st, 16), +type openMetricsParserOptions struct { + SkipCTSeries bool +} + +type OpenMetricsOption func(*openMetricsParserOptions) + +// WithOMParserCTSeriesSkipped turns off exposing _created lines +// as series, which makes those only used for parsing created timestamp +// for `CreatedTimestamp` method purposes. +// +// It's recommended to use this option to avoid using _created lines for other +// purposes than created timestamp, but leave false by default for the +// best-effort compatibility. +func WithOMParserCTSeriesSkipped() OpenMetricsOption { + return func(o *openMetricsParserOptions) { + o.SkipCTSeries = true } } +// NewOpenMetricsParser returns a new parser for the byte slice with option to skip CT series parsing. +func NewOpenMetricsParser(b []byte, st *labels.SymbolTable, opts ...OpenMetricsOption) Parser { + options := &openMetricsParserOptions{} + + for _, opt := range opts { + opt(options) + } + + parser := &OpenMetricsParser{ + l: &openMetricsLexer{b: b}, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), + skipCTSeries: options.SkipCTSeries, + } + + return parser +} + // Series returns the bytes of the series, the timestamp if set, and the value // of the current sample. func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) { @@ -219,10 +249,90 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { return true } -// CreatedTimestamp returns nil as it's not implemented yet. -// TODO(bwplotka): https://github.com/prometheus/prometheus/issues/12980 +// CreatedTimestamp returns the created timestamp for a current Metric if exists or nil. +// NOTE(Maniktherana): Might use additional CPU/mem resources due to deep copy of parser required for peeking given 1.0 OM specification on _created series. func (p *OpenMetricsParser) CreatedTimestamp() *int64 { - return nil + if !TypeRequiresCT(p.mtype) { + // Not a CT supported metric type, fast path. + return nil + } + + var ( + currLset labels.Labels + buf []byte + peekWithoutNameLsetHash uint64 + ) + p.Metric(&currLset) + currFamilyLsetHash, buf := currLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") + // Search for the _created line for the currFamilyLsetHash using ephemeral parser until + // we see EOF or new metric family. We have to do it as we don't know where (and if) + // that CT line is. + // TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. + peek := deepCopy(p) + for { + eType, err := peek.Next() + if err != nil { + // This means peek will give error too later on, so def no CT line found. + // This might result in partial scrape with wrong/missing CT, but only + // spec improvement would help. + // TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. + return nil + } + if eType != EntrySeries { + // Assume we hit different family, no CT line found. + return nil + } + + var peekedLset labels.Labels + peek.Metric(&peekedLset) + peekedName := peekedLset.Get(model.MetricNameLabel) + if !strings.HasSuffix(peekedName, "_created") { + // Not a CT line, search more. + continue + } + + // We got a CT line here, but let's search if CT line is actually for our series, edge case. + peekWithoutNameLsetHash, _ = peekedLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") + if peekWithoutNameLsetHash != currFamilyLsetHash { + // CT line for a different series, for our series no CT. + return nil + } + ct := int64(peek.val) + return &ct + } +} + +// TypeRequiresCT returns true if the metric type requires a _created timestamp. +func TypeRequiresCT(t model.MetricType) bool { + switch t { + case model.MetricTypeCounter, model.MetricTypeSummary, model.MetricTypeHistogram: + return true + default: + return false + } +} + +// deepCopy creates a copy of a parser without re-using the slices' original memory addresses. +func deepCopy(p *OpenMetricsParser) OpenMetricsParser { + newB := make([]byte, len(p.l.b)) + copy(newB, p.l.b) + + newLexer := &openMetricsLexer{ + b: newB, + i: p.l.i, + start: p.l.start, + err: p.l.err, + state: p.l.state, + } + + newParser := OpenMetricsParser{ + l: newLexer, + builder: p.builder, + mtype: p.mtype, + val: p.val, + skipCTSeries: false, + } + return newParser } // nextToken returns the next token from the openMetricsLexer. @@ -337,7 +447,13 @@ func (p *OpenMetricsParser) Next() (Entry, error) { } p.series = p.l.b[p.start:p.l.i] - return p.parseMetricSuffix(p.nextToken()) + if err := p.parseSeriesEndOfLine(p.nextToken()); err != nil { + return EntryInvalid, err + } + if p.skipCTSeries && p.isCreatedSeries() { + return p.Next() + } + return EntrySeries, nil case tMName: p.offsets = append(p.offsets, p.start, p.l.i) p.series = p.l.b[p.start:p.l.i] @@ -351,8 +467,14 @@ func (p *OpenMetricsParser) Next() (Entry, error) { p.series = p.l.b[p.start:p.l.i] t2 = p.nextToken() } - return p.parseMetricSuffix(t2) + if err := p.parseSeriesEndOfLine(t2); err != nil { + return EntryInvalid, err + } + if p.skipCTSeries && p.isCreatedSeries() { + return p.Next() + } + return EntrySeries, nil default: err = p.parseError("expected a valid start token", t) } @@ -467,51 +589,64 @@ func (p *OpenMetricsParser) parseLVals(offsets []int, isExemplar bool) ([]int, e } } -// parseMetricSuffix parses the end of the line after the metric name and -// labels. It starts parsing with the provided token. -func (p *OpenMetricsParser) parseMetricSuffix(t token) (Entry, error) { +// isCreatedSeries returns true if the current series is a _created series. +func (p *OpenMetricsParser) isCreatedSeries() bool { + var newLbs labels.Labels + p.Metric(&newLbs) + name := newLbs.Get(model.MetricNameLabel) + if TypeRequiresCT(p.mtype) && strings.HasSuffix(name, "_created") { + return true + } + return false +} + +// parseSeriesEndOfLine parses the series end of the line (value, optional +// timestamp, commentary, etc.) after the metric name and labels. +// It starts parsing with the provided token. +func (p *OpenMetricsParser) parseSeriesEndOfLine(t token) error { if p.offsets[0] == -1 { - return EntryInvalid, fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i]) + return fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i]) } var err error p.val, err = p.getFloatValue(t, "metric") if err != nil { - return EntryInvalid, err + return err } p.hasTS = false switch t2 := p.nextToken(); t2 { case tEOF: - return EntryInvalid, errors.New("data does not end with # EOF") + return errors.New("data does not end with # EOF") case tLinebreak: break case tComment: if err := p.parseComment(); err != nil { - return EntryInvalid, err + return err } case tTimestamp: p.hasTS = true var ts float64 // A float is enough to hold what we need for millisecond resolution. if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { - return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) + return fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) } if math.IsNaN(ts) || math.IsInf(ts, 0) { - return EntryInvalid, fmt.Errorf("invalid timestamp %f", ts) + return fmt.Errorf("invalid timestamp %f", ts) } p.ts = int64(ts * 1000) switch t3 := p.nextToken(); t3 { case tLinebreak: case tComment: if err := p.parseComment(); err != nil { - return EntryInvalid, err + return err } default: - return EntryInvalid, p.parseError("expected next entry after timestamp", t3) + return p.parseError("expected next entry after timestamp", t3) } } - return EntrySeries, nil + + return nil } func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) { diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index bc76a540d..cadaabc99 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -14,6 +14,7 @@ package textparse import ( + "errors" "io" "testing" @@ -24,6 +25,8 @@ import ( "github.com/prometheus/prometheus/model/labels" ) +func int64p(x int64) *int64 { return &x } + func TestOpenMetricsParse(t *testing.T) { input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations. # TYPE go_gc_duration_seconds summary @@ -63,15 +66,34 @@ ss{A="a"} 0 _metric_starting_with_underscore 1 testmetric{_label_starting_with_underscore="foo"} 1 testmetric{label="\"bar\""} 1 +# HELP foo Counter with and without labels to certify CT is parsed for both cases # TYPE foo counter -foo_total 17.0 1520879607.789 # {id="counter-test"} 5` +foo_total 17.0 1520879607.789 # {id="counter-test"} 5 +foo_created 1000 +foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5 +foo_created{a="b"} 1000 +# HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far +# TYPE bar summary +bar_count 17.0 +bar_sum 324789.3 +bar{quantile="0.95"} 123.7 +bar{quantile="0.99"} 150.0 +bar_created 1520430000 +# HELP baz Histogram with the same objective as above's summary +# TYPE baz histogram +baz_bucket{le="0.0"} 0 +baz_bucket{le="+Inf"} 17 +baz_count 17 +baz_sum 324789.3 +baz_created 1520430000 +# HELP fizz_created Gauge which shouldn't be parsed as CT +# TYPE fizz_created gauge +fizz_created 17.0` input += "\n# HELP metric foo\x00bar" input += "\nnull_byte_metric{a=\"abc\x00\"} 1" input += "\n# EOF\n" - int64p := func(x int64) *int64 { return &x } - exp := []expectedParse{ { m: "go_gc_duration_seconds", @@ -216,6 +238,9 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` m: "testmetric{label=\"\\\"bar\\\"\"}", v: 1, lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`), + }, { + m: "foo", + help: "Counter with and without labels to certify CT is parsed for both cases", }, { m: "foo", typ: model.MetricTypeCounter, @@ -225,6 +250,76 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` lset: labels.FromStrings("__name__", "foo_total"), t: int64p(1520879607789), e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, + ct: int64p(1000), + }, { + m: `foo_total{a="b"}`, + v: 17.0, + lset: labels.FromStrings("__name__", "foo_total", "a", "b"), + t: int64p(1520879607789), + e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, + ct: int64p(1000), + }, { + m: "bar", + help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far", + }, { + m: "bar", + typ: model.MetricTypeSummary, + }, { + m: "bar_count", + v: 17.0, + lset: labels.FromStrings("__name__", "bar_count"), + ct: int64p(1520430000), + }, { + m: "bar_sum", + v: 324789.3, + lset: labels.FromStrings("__name__", "bar_sum"), + ct: int64p(1520430000), + }, { + m: `bar{quantile="0.95"}`, + v: 123.7, + lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"), + ct: int64p(1520430000), + }, { + m: `bar{quantile="0.99"}`, + v: 150.0, + lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"), + ct: int64p(1520430000), + }, { + m: "baz", + help: "Histogram with the same objective as above's summary", + }, { + m: "baz", + typ: model.MetricTypeHistogram, + }, { + m: `baz_bucket{le="0.0"}`, + v: 0, + lset: labels.FromStrings("__name__", "baz_bucket", "le", "0.0"), + ct: int64p(1520430000), + }, { + m: `baz_bucket{le="+Inf"}`, + v: 17, + lset: labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"), + ct: int64p(1520430000), + }, { + m: `baz_count`, + v: 17, + lset: labels.FromStrings("__name__", "baz_count"), + ct: int64p(1520430000), + }, { + m: `baz_sum`, + v: 324789.3, + lset: labels.FromStrings("__name__", "baz_sum"), + ct: int64p(1520430000), + }, { + m: "fizz_created", + help: "Gauge which shouldn't be parsed as CT", + }, { + m: "fizz_created", + typ: model.MetricTypeGauge, + }, { + m: `fizz_created`, + v: 17, + lset: labels.FromStrings("__name__", "fizz_created"), }, { m: "metric", help: "foo\x00bar", @@ -235,8 +330,8 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` }, } - p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable()) - checkParseResults(t, p, exp) + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + checkParseResultsWithCT(t, p, exp, true) } func TestUTF8OpenMetricsParse(t *testing.T) { @@ -251,6 +346,7 @@ func TestUTF8OpenMetricsParse(t *testing.T) { # UNIT "go.gc_duration_seconds" seconds {"go.gc_duration_seconds",quantile="0"} 4.9351e-05 {"go.gc_duration_seconds",quantile="0.25"} 7.424100000000001e-05 +{"go.gc_duration_seconds_created"} 12313 {"go.gc_duration_seconds",quantile="0.5",a="b"} 8.3835e-05 {"http.status",q="0.9",a="b"} 8.3835e-05 {"http.status",q="0.9",a="b"} 8.3835e-05 @@ -274,10 +370,12 @@ func TestUTF8OpenMetricsParse(t *testing.T) { m: `{"go.gc_duration_seconds",quantile="0"}`, v: 4.9351e-05, lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"), + ct: int64p(12313), }, { m: `{"go.gc_duration_seconds",quantile="0.25"}`, v: 7.424100000000001e-05, lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.25"), + ct: int64p(12313), }, { m: `{"go.gc_duration_seconds",quantile="0.5",a="b"}`, v: 8.3835e-05, @@ -306,8 +404,8 @@ choices}`, "strange©™\n'quoted' \"name\"", "6"), }, } - p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable()) - checkParseResults(t, p, exp) + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + checkParseResultsWithCT(t, p, exp, true) } func TestOpenMetricsParseErrors(t *testing.T) { @@ -598,10 +696,6 @@ func TestOpenMetricsParseErrors(t *testing.T) { input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 -Inf", err: `invalid exemplar timestamp -Inf`, }, - { - input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 Inf", - err: `invalid exemplar timestamp +Inf`, - }, } for i, c := range cases { @@ -684,3 +778,217 @@ func TestOMNullByteHandling(t *testing.T) { require.Equal(t, c.err, err.Error(), "test %d", i) } } + +// While not desirable, there are cases were CT fails to parse and +// these tests show them. +// TODO(maniktherana): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. +func TestCTParseFailures(t *testing.T) { + input := `# HELP something Histogram with _created between buckets and summary +# TYPE something histogram +something_count 17 +something_sum 324789.3 +something_created 1520430001 +something_bucket{le="0.0"} 0 +something_bucket{le="+Inf"} 17 +# HELP thing Histogram with _created as first line +# TYPE thing histogram +thing_created 1520430002 +thing_count 17 +thing_sum 324789.3 +thing_bucket{le="0.0"} 0 +thing_bucket{le="+Inf"} 17 +# HELP yum Summary with _created between sum and quantiles +# TYPE yum summary +yum_count 17.0 +yum_sum 324789.3 +yum_created 1520430003 +yum{quantile="0.95"} 123.7 +yum{quantile="0.99"} 150.0 +# HELP foobar Summary with _created as the first line +# TYPE foobar summary +foobar_created 1520430004 +foobar_count 17.0 +foobar_sum 324789.3 +foobar{quantile="0.95"} 123.7 +foobar{quantile="0.99"} 150.0` + + input += "\n# EOF\n" + + int64p := func(x int64) *int64 { return &x } + + type expectCT struct { + m string + ct *int64 + typ model.MetricType + help string + isErr bool + } + + exp := []expectCT{ + { + m: "something", + help: "Histogram with _created between buckets and summary", + isErr: false, + }, { + m: "something", + typ: model.MetricTypeHistogram, + isErr: false, + }, { + m: `something_count`, + ct: int64p(1520430001), + isErr: false, + }, { + m: `something_sum`, + ct: int64p(1520430001), + isErr: false, + }, { + m: `something_bucket{le="0.0"}`, + ct: int64p(1520430001), + isErr: true, + }, { + m: `something_bucket{le="+Inf"}`, + ct: int64p(1520430001), + isErr: true, + }, { + m: "thing", + help: "Histogram with _created as first line", + isErr: false, + }, { + m: "thing", + typ: model.MetricTypeHistogram, + isErr: false, + }, { + m: `thing_count`, + ct: int64p(1520430002), + isErr: true, + }, { + m: `thing_sum`, + ct: int64p(1520430002), + isErr: true, + }, { + m: `thing_bucket{le="0.0"}`, + ct: int64p(1520430002), + isErr: true, + }, { + m: `thing_bucket{le="+Inf"}`, + ct: int64p(1520430002), + isErr: true, + }, { + m: "yum", + help: "Summary with _created between summary and quantiles", + isErr: false, + }, { + m: "yum", + typ: model.MetricTypeSummary, + isErr: false, + }, { + m: "yum_count", + ct: int64p(1520430003), + isErr: false, + }, { + m: "yum_sum", + ct: int64p(1520430003), + isErr: false, + }, { + m: `yum{quantile="0.95"}`, + ct: int64p(1520430003), + isErr: true, + }, { + m: `yum{quantile="0.99"}`, + ct: int64p(1520430003), + isErr: true, + }, { + m: "foobar", + help: "Summary with _created as the first line", + isErr: false, + }, { + m: "foobar", + typ: model.MetricTypeSummary, + isErr: false, + }, { + m: "foobar_count", + ct: int64p(1520430004), + isErr: true, + }, { + m: "foobar_sum", + ct: int64p(1520430004), + isErr: true, + }, { + m: `foobar{quantile="0.95"}`, + ct: int64p(1520430004), + isErr: true, + }, { + m: `foobar{quantile="0.99"}`, + ct: int64p(1520430004), + isErr: true, + }, + } + + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + i := 0 + + var res labels.Labels + for { + et, err := p.Next() + if errors.Is(err, io.EOF) { + break + } + require.NoError(t, err) + + switch et { + case EntrySeries: + p.Metric(&res) + + if ct := p.CreatedTimestamp(); exp[i].isErr { + require.Nil(t, ct) + } else { + require.Equal(t, *exp[i].ct, *ct) + } + default: + i++ + continue + } + i++ + } +} + +func TestDeepCopy(t *testing.T) { + input := []byte(`# HELP go_goroutines A gauge goroutines. +# TYPE go_goroutines gauge +go_goroutines 33 123.123 +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds +go_gc_duration_seconds_created`) + + st := labels.NewSymbolTable() + parser := NewOpenMetricsParser(input, st, WithOMParserCTSeriesSkipped()).(*OpenMetricsParser) + + // Modify the original parser state + _, err := parser.Next() + require.NoError(t, err) + require.Equal(t, "go_goroutines", string(parser.l.b[parser.offsets[0]:parser.offsets[1]])) + require.True(t, parser.skipCTSeries) + + // Create a deep copy of the parser + copyParser := deepCopy(parser) + etype, err := copyParser.Next() + require.NoError(t, err) + require.Equal(t, EntryType, etype) + require.True(t, parser.skipCTSeries) + require.False(t, copyParser.skipCTSeries) + + // Modify the original parser further + parser.Next() + parser.Next() + parser.Next() + require.Equal(t, "go_gc_duration_seconds", string(parser.l.b[parser.offsets[0]:parser.offsets[1]])) + require.Equal(t, "summary", string(parser.mtype)) + require.False(t, copyParser.skipCTSeries) + require.True(t, parser.skipCTSeries) + + // Ensure the copy remains unchanged + copyParser.Next() + copyParser.Next() + require.Equal(t, "go_gc_duration_seconds", string(copyParser.l.b[copyParser.offsets[0]:copyParser.offsets[1]])) + require.False(t, copyParser.skipCTSeries) +} diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index 66986291d..7971d23b7 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -18,6 +18,7 @@ import ( "errors" "io" "os" + "strings" "testing" "github.com/klauspost/compress/gzip" @@ -41,6 +42,7 @@ type expectedParse struct { unit string comment string e *exemplar.Exemplar + ct *int64 } func TestPromParse(t *testing.T) { @@ -188,6 +190,10 @@ testmetric{label="\"bar\""} 1` } func checkParseResults(t *testing.T, p Parser, exp []expectedParse) { + checkParseResultsWithCT(t, p, exp, false) +} + +func checkParseResultsWithCT(t *testing.T, p Parser, exp []expectedParse, ctLinesRemoved bool) { i := 0 var res labels.Labels @@ -205,6 +211,14 @@ func checkParseResults(t *testing.T, p Parser, exp []expectedParse) { p.Metric(&res) + if ctLinesRemoved { + // Are CT series skipped? + _, typ := p.Type() + if TypeRequiresCT(typ) && strings.HasSuffix(res.Get(labels.MetricName), "_created") { + t.Fatalf("we exped created lines skipped") + } + } + require.Equal(t, exp[i].m, string(m)) require.Equal(t, exp[i].t, ts) require.Equal(t, exp[i].v, v) @@ -218,6 +232,11 @@ func checkParseResults(t *testing.T, p Parser, exp []expectedParse) { require.True(t, found) testutil.RequireEqual(t, *exp[i].e, e) } + if ct := p.CreatedTimestamp(); ct != nil { + require.Equal(t, *exp[i].ct, *ct) + } else { + require.Nil(t, exp[i].ct) + } case EntryType: m, typ := p.Type() @@ -475,8 +494,10 @@ const ( func BenchmarkParse(b *testing.B) { for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{ - "prometheus": NewPromParser, - "openmetrics": NewOpenMetricsParser, + "prometheus": NewPromParser, + "openmetrics": func(b []byte, st *labels.SymbolTable) Parser { + return NewOpenMetricsParser(b, st) + }, } { for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} { f, err := os.Open(fn) diff --git a/promql/fuzz.go b/promql/fuzz.go index 3fd50b949..5f08e6a72 100644 --- a/promql/fuzz.go +++ b/promql/fuzz.go @@ -68,6 +68,10 @@ func fuzzParseMetricWithContentType(in []byte, contentType string) int { panic(warning) } + if contentType == "application/openmetrics-text" { + p = textparse.NewOpenMetricsParser(in, symbolTable) + } + var err error for { _, err = p.Next()