textparse/scrape: Add option to scrape both classic and native histograms

So far, if a target exposes a histogram with both classic and native
buckets, a native-histogram enabled Prometheus would ignore the
classic buckets. With the new scrape config option
`scrape_classic_histograms` set, both buckets will be ingested,
creating all the series of a classic histogram in parallel to the
native histogram series. For example, a histogram `foo` would create a
native histogram series `foo` and classic series called `foo_sum`,
`foo_count`, and `foo_bucket`.

This feature can be used in a migration strategy from classic to
native histograms, where it is desired to have a transition period
during which both native and classic histograms are present.

Note that two bugs in classic histogram parsing were found and fixed
as a byproduct of testing the new feature:

1. Series created from classic _gauge_ histograms didn't get the
   _sum/_count/_bucket prefix set.
2. Values of classic _float_ histograms weren't parsed properly.

Signed-off-by: beorn7 <beorn@grafana.com>
This commit is contained in:
beorn7 2023-05-11 01:59:21 +02:00
parent bd98fc8c45
commit 9e500345f3
10 changed files with 1148 additions and 486 deletions

View file

@ -146,8 +146,9 @@ var (
// DefaultScrapeConfig is the default scrape configuration. // DefaultScrapeConfig is the default scrape configuration.
DefaultScrapeConfig = ScrapeConfig{ DefaultScrapeConfig = ScrapeConfig{
// ScrapeTimeout and ScrapeInterval default to the // ScrapeTimeout and ScrapeInterval default to the configured
// configured globals. // globals.
ScrapeClassicHistograms: false,
MetricsPath: "/metrics", MetricsPath: "/metrics",
Scheme: "http", Scheme: "http",
HonorLabels: false, HonorLabels: false,
@ -467,6 +468,8 @@ type ScrapeConfig struct {
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"` ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
// The timeout for scraping targets of this config. // The timeout for scraping targets of this config.
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"` ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
// Whether to scrape a classic histogram that is also exposed as a native histogram.
ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"`
// The HTTP resource path on which to fetch metrics from targets. // The HTTP resource path on which to fetch metrics from targets.
MetricsPath string `yaml:"metrics_path,omitempty"` MetricsPath string `yaml:"metrics_path,omitempty"`
// The URL scheme with which to fetch metrics from targets. // The URL scheme with which to fetch metrics from targets.

View file

@ -134,6 +134,10 @@ job_name: <job_name>
# Per-scrape timeout when scraping this job. # Per-scrape timeout when scraping this job.
[ scrape_timeout: <duration> | default = <global_config.scrape_timeout> ] [ scrape_timeout: <duration> | default = <global_config.scrape_timeout> ]
# Whether to scrape a classic histogram that is also exposed as a native
# histogram (has no effect without --enable-feature=native-histograms).
[ scrape_classic_histograms: <boolean> | default = false ]
# The HTTP resource path on which to fetch metrics from targets. # The HTTP resource path on which to fetch metrics from targets.
[ metrics_path: <path> | default = /metrics ] [ metrics_path: <path> | default = /metrics ]

View file

@ -71,7 +71,7 @@ type Parser interface {
// //
// This function always returns a valid parser, but might additionally // This function always returns a valid parser, but might additionally
// return an error if the content type cannot be parsed. // return an error if the content type cannot be parsed.
func New(b []byte, contentType string) (Parser, error) { func New(b []byte, contentType string, parseClassicHistograms bool) (Parser, error) {
if contentType == "" { if contentType == "" {
return NewPromParser(b), nil return NewPromParser(b), nil
} }
@ -84,7 +84,7 @@ func New(b []byte, contentType string) (Parser, error) {
case "application/openmetrics-text": case "application/openmetrics-text":
return NewOpenMetricsParser(b), nil return NewOpenMetricsParser(b), nil
case "application/vnd.google.protobuf": case "application/vnd.google.protobuf":
return NewProtobufParser(b), nil return NewProtobufParser(b, parseClassicHistograms), nil
default: default:
return NewPromParser(b), nil return NewPromParser(b), nil
} }
@ -100,7 +100,7 @@ const (
EntrySeries Entry = 2 // A series with a simple float64 as value. EntrySeries Entry = 2 // A series with a simple float64 as value.
EntryComment Entry = 3 EntryComment Entry = 3
EntryUnit Entry = 4 EntryUnit Entry = 4
EntryHistogram Entry = 5 // A series with a sparse histogram as a value. EntryHistogram Entry = 5 // A series with a native histogram as a value.
) )
// MetricType represents metric type values. // MetricType represents metric type values.

View file

@ -91,7 +91,7 @@ func TestNewParser(t *testing.T) {
tt := tt // Copy to local variable before going parallel. tt := tt // Copy to local variable before going parallel.
t.Parallel() t.Parallel()
p, err := New([]byte{}, tt.contentType) p, err := New([]byte{}, tt.contentType, false)
tt.validateParser(t, p) tt.validateParser(t, p)
if tt.err == "" { if tt.err == "" {
require.NoError(t, err) require.NoError(t, err)

View file

@ -54,6 +54,8 @@ type ProtobufParser struct {
// quantiles/buckets. // quantiles/buckets.
fieldPos int fieldPos int
fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed.
redoClassic bool // true after parsing a native histogram if we need to parse it again as a classit histogram.
// state is marked by the entry we are processing. EntryInvalid implies // state is marked by the entry we are processing. EntryInvalid implies
// that we have to decode the next MetricFamily. // that we have to decode the next MetricFamily.
state Entry state Entry
@ -62,17 +64,22 @@ type ProtobufParser struct {
mf *dto.MetricFamily mf *dto.MetricFamily
// Wether to also parse a classic histogram that is also present as a
// native histogram.
parseClassicHistograms bool
// The following are just shenanigans to satisfy the Parser interface. // The following are just shenanigans to satisfy the Parser interface.
metricBytes *bytes.Buffer // A somewhat fluid representation of the current metric. metricBytes *bytes.Buffer // A somewhat fluid representation of the current metric.
} }
// NewProtobufParser returns a parser for the payload in the byte slice. // NewProtobufParser returns a parser for the payload in the byte slice.
func NewProtobufParser(b []byte) Parser { func NewProtobufParser(b []byte, parseClassicHistograms bool) Parser {
return &ProtobufParser{ return &ProtobufParser{
in: b, in: b,
state: EntryInvalid, state: EntryInvalid,
mf: &dto.MetricFamily{}, mf: &dto.MetricFamily{},
metricBytes: &bytes.Buffer{}, metricBytes: &bytes.Buffer{},
parseClassicHistograms: parseClassicHistograms,
} }
} }
@ -106,21 +113,30 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) {
v = s.GetQuantile()[p.fieldPos].GetValue() v = s.GetQuantile()[p.fieldPos].GetValue()
} }
case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM:
// This should only happen for a legacy histogram. // This should only happen for a classic histogram.
h := m.GetHistogram() h := m.GetHistogram()
switch p.fieldPos { switch p.fieldPos {
case -2: case -2:
v = h.GetSampleCountFloat()
if v == 0 {
v = float64(h.GetSampleCount()) v = float64(h.GetSampleCount())
}
case -1: case -1:
v = h.GetSampleSum() v = h.GetSampleSum()
default: default:
bb := h.GetBucket() bb := h.GetBucket()
if p.fieldPos >= len(bb) { if p.fieldPos >= len(bb) {
v = h.GetSampleCountFloat()
if v == 0 {
v = float64(h.GetSampleCount()) v = float64(h.GetSampleCount())
}
} else { } else {
v = bb[p.fieldPos].GetCumulativeCountFloat()
if v == 0 {
v = float64(bb[p.fieldPos].GetCumulativeCount()) v = float64(bb[p.fieldPos].GetCumulativeCount())
} }
} }
}
default: default:
panic("encountered unexpected metric type, this is a bug") panic("encountered unexpected metric type, this is a bug")
} }
@ -149,6 +165,9 @@ func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *his
ts = m.GetTimestampMs() ts = m.GetTimestampMs()
h = m.GetHistogram() h = m.GetHistogram()
) )
if p.parseClassicHistograms && len(h.GetBucket()) > 0 {
p.redoClassic = true
}
if h.GetSampleCountFloat() > 0 || h.GetZeroCountFloat() > 0 { if h.GetSampleCountFloat() > 0 || h.GetZeroCountFloat() > 0 {
// It is a float histogram. // It is a float histogram.
fh := histogram.FloatHistogram{ fh := histogram.FloatHistogram{
@ -376,6 +395,12 @@ func (p *ProtobufParser) Next() (Entry, error) {
return EntryInvalid, err return EntryInvalid, err
} }
case EntryHistogram, EntrySeries: case EntryHistogram, EntrySeries:
if p.redoClassic {
p.redoClassic = false
p.state = EntrySeries
p.fieldPos = -3
p.fieldsDone = false
}
t := p.mf.GetType() t := p.mf.GetType()
if p.state == EntrySeries && !p.fieldsDone && if p.state == EntrySeries && !p.fieldsDone &&
(t == dto.MetricType_SUMMARY || (t == dto.MetricType_SUMMARY ||
@ -432,7 +457,7 @@ func (p *ProtobufParser) updateMetricBytes() error {
// state. // state.
func (p *ProtobufParser) getMagicName() string { func (p *ProtobufParser) getMagicName() string {
t := p.mf.GetType() t := p.mf.GetType()
if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_SUMMARY) { if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_GAUGE_HISTOGRAM && t != dto.MetricType_SUMMARY) {
return p.mf.GetName() return p.mf.GetName()
} }
if p.fieldPos == -2 { if p.fieldPos == -2 {

View file

@ -30,8 +30,8 @@ import (
dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" dto "github.com/prometheus/prometheus/prompb/io/prometheus/client"
) )
func TestProtobufParse(t *testing.T) { func createTestProtoBuf(t *testing.T) *bytes.Buffer {
textMetricFamilies := []string{ testMetricFamilies := []string{
`name: "go_build_info" `name: "go_build_info"
help: "Build information about the main Go module." help: "Build information about the main Go module."
type: GAUGE type: GAUGE
@ -231,7 +231,6 @@ help: "Test float histogram with many buckets removed to keep it manageable in s
type: HISTOGRAM type: HISTOGRAM
metric: < metric: <
histogram: < histogram: <
sample_count: 175
sample_count_float: 175.0 sample_count_float: 175.0
sample_sum: 0.0008280461746287094 sample_sum: 0.0008280461746287094
bucket: < bucket: <
@ -302,7 +301,6 @@ help: "Like test_float_histogram but as gauge histogram."
type: GAUGE_HISTOGRAM type: GAUGE_HISTOGRAM
metric: < metric: <
histogram: < histogram: <
sample_count: 175
sample_count_float: 175.0 sample_count_float: 175.0
sample_sum: 0.0008280461746287094 sample_sum: 0.0008280461746287094
bucket: < bucket: <
@ -450,9 +448,9 @@ metric: <
} }
varintBuf := make([]byte, binary.MaxVarintLen32) varintBuf := make([]byte, binary.MaxVarintLen32)
inputBuf := &bytes.Buffer{} buf := &bytes.Buffer{}
for _, tmf := range textMetricFamilies { for _, tmf := range testMetricFamilies {
pb := &dto.MetricFamily{} pb := &dto.MetricFamily{}
// From text to proto message. // From text to proto message.
require.NoError(t, proto.UnmarshalText(tmf, pb)) require.NoError(t, proto.UnmarshalText(tmf, pb))
@ -462,11 +460,15 @@ metric: <
// Write first length, then binary protobuf. // Write first length, then binary protobuf.
varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf)))
inputBuf.Write(varintBuf[:varintLength]) buf.Write(varintBuf[:varintLength])
inputBuf.Write(protoBuf) buf.Write(protoBuf)
} }
exp := []struct { return buf
}
func TestProtobufParse(t *testing.T) {
type parseResult struct {
lset labels.Labels lset labels.Labels
m string m string
t int64 t int64
@ -478,7 +480,19 @@ metric: <
shs *histogram.Histogram shs *histogram.Histogram
fhs *histogram.FloatHistogram fhs *histogram.FloatHistogram
e []exemplar.Exemplar e []exemplar.Exemplar
}
inputBuf := createTestProtoBuf(t)
scenarios := []struct {
name string
parser Parser
expected []parseResult
}{ }{
{
name: "ignore classic buckets of native histograms",
parser: NewProtobufParser(inputBuf.Bytes(), false),
expected: []parseResult{
{ {
m: "go_build_info", m: "go_build_info",
help: "Build information about the main Go module.", help: "Build information about the main Go module.",
@ -810,12 +824,587 @@ metric: <
"__name__", "without_quantiles_sum", "__name__", "without_quantiles_sum",
), ),
}, },
},
},
{
name: "parse classic and native buckets",
parser: NewProtobufParser(inputBuf.Bytes(), true),
expected: []parseResult{
{ // 0
m: "go_build_info",
help: "Build information about the main Go module.",
},
{ // 1
m: "go_build_info",
typ: MetricTypeGauge,
},
{ // 2
m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)",
v: 1,
lset: labels.FromStrings(
"__name__", "go_build_info",
"checksum", "",
"path", "github.com/prometheus/client_golang",
"version", "(devel)",
),
},
{ // 3
m: "go_memstats_alloc_bytes_total",
help: "Total number of bytes allocated, even if freed.",
},
{ // 4
m: "go_memstats_alloc_bytes_total",
typ: MetricTypeCounter,
},
{ // 5
m: "go_memstats_alloc_bytes_total",
v: 1.546544e+06,
lset: labels.FromStrings(
"__name__", "go_memstats_alloc_bytes_total",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233},
},
},
{ // 6
m: "something_untyped",
help: "Just to test the untyped type.",
},
{ // 7
m: "something_untyped",
typ: MetricTypeUnknown,
},
{ // 8
m: "something_untyped",
t: 1234567,
v: 42,
lset: labels.FromStrings(
"__name__", "something_untyped",
),
},
{ // 9
m: "test_histogram",
help: "Test histogram with many buckets removed to keep it manageable in size.",
},
{ // 10
m: "test_histogram",
typ: MetricTypeHistogram,
},
{ // 11
m: "test_histogram",
t: 1234568,
shs: &histogram.Histogram{
Count: 175,
ZeroCount: 2,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
lset: labels.FromStrings(
"__name__", "test_histogram",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
{ // 12
m: "test_histogram_count",
t: 1234568,
v: 175,
lset: labels.FromStrings(
"__name__", "test_histogram_count",
),
},
{ // 13
m: "test_histogram_sum",
t: 1234568,
v: 0.0008280461746287094,
lset: labels.FromStrings(
"__name__", "test_histogram_sum",
),
},
{ // 14
m: "test_histogram_bucket\xffle\xff-0.0004899999999999998",
t: 1234568,
v: 2,
lset: labels.FromStrings(
"__name__", "test_histogram_bucket",
"le", "-0.0004899999999999998",
),
},
{ // 15
m: "test_histogram_bucket\xffle\xff-0.0003899999999999998",
t: 1234568,
v: 4,
lset: labels.FromStrings(
"__name__", "test_histogram_bucket",
"le", "-0.0003899999999999998",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
{ // 16
m: "test_histogram_bucket\xffle\xff-0.0002899999999999998",
t: 1234568,
v: 16,
lset: labels.FromStrings(
"__name__", "test_histogram_bucket",
"le", "-0.0002899999999999998",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
{ // 17
m: "test_histogram_bucket\xffle\xff+Inf",
t: 1234568,
v: 175,
lset: labels.FromStrings(
"__name__", "test_histogram_bucket",
"le", "+Inf",
),
},
{ // 18
m: "test_gauge_histogram",
help: "Like test_histogram but as gauge histogram.",
},
{ // 19
m: "test_gauge_histogram",
typ: MetricTypeGaugeHistogram,
},
{ // 20
m: "test_gauge_histogram",
t: 1234568,
shs: &histogram.Histogram{
CounterResetHint: histogram.GaugeType,
Count: 175,
ZeroCount: 2,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
lset: labels.FromStrings(
"__name__", "test_gauge_histogram",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
{ // 21
m: "test_gauge_histogram_count",
t: 1234568,
v: 175,
lset: labels.FromStrings(
"__name__", "test_gauge_histogram_count",
),
},
{ // 22
m: "test_gauge_histogram_sum",
t: 1234568,
v: 0.0008280461746287094,
lset: labels.FromStrings(
"__name__", "test_gauge_histogram_sum",
),
},
{ // 23
m: "test_gauge_histogram_bucket\xffle\xff-0.0004899999999999998",
t: 1234568,
v: 2,
lset: labels.FromStrings(
"__name__", "test_gauge_histogram_bucket",
"le", "-0.0004899999999999998",
),
},
{ // 24
m: "test_gauge_histogram_bucket\xffle\xff-0.0003899999999999998",
t: 1234568,
v: 4,
lset: labels.FromStrings(
"__name__", "test_gauge_histogram_bucket",
"le", "-0.0003899999999999998",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
{ // 25
m: "test_gauge_histogram_bucket\xffle\xff-0.0002899999999999998",
t: 1234568,
v: 16,
lset: labels.FromStrings(
"__name__", "test_gauge_histogram_bucket",
"le", "-0.0002899999999999998",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
{ // 26
m: "test_gauge_histogram_bucket\xffle\xff+Inf",
t: 1234568,
v: 175,
lset: labels.FromStrings(
"__name__", "test_gauge_histogram_bucket",
"le", "+Inf",
),
},
{ // 27
m: "test_float_histogram",
help: "Test float histogram with many buckets removed to keep it manageable in size.",
},
{ // 28
m: "test_float_histogram",
typ: MetricTypeHistogram,
},
{ // 29
m: "test_float_histogram",
t: 1234568,
fhs: &histogram.FloatHistogram{
Count: 175.0,
ZeroCount: 2.0,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0},
NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0},
},
lset: labels.FromStrings(
"__name__", "test_float_histogram",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
{ // 30
m: "test_float_histogram_count",
t: 1234568,
v: 175,
lset: labels.FromStrings(
"__name__", "test_float_histogram_count",
),
},
{ // 31
m: "test_float_histogram_sum",
t: 1234568,
v: 0.0008280461746287094,
lset: labels.FromStrings(
"__name__", "test_float_histogram_sum",
),
},
{ // 32
m: "test_float_histogram_bucket\xffle\xff-0.0004899999999999998",
t: 1234568,
v: 2,
lset: labels.FromStrings(
"__name__", "test_float_histogram_bucket",
"le", "-0.0004899999999999998",
),
},
{ // 33
m: "test_float_histogram_bucket\xffle\xff-0.0003899999999999998",
t: 1234568,
v: 4,
lset: labels.FromStrings(
"__name__", "test_float_histogram_bucket",
"le", "-0.0003899999999999998",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
{ // 34
m: "test_float_histogram_bucket\xffle\xff-0.0002899999999999998",
t: 1234568,
v: 16,
lset: labels.FromStrings(
"__name__", "test_float_histogram_bucket",
"le", "-0.0002899999999999998",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
{ // 35
m: "test_float_histogram_bucket\xffle\xff+Inf",
t: 1234568,
v: 175,
lset: labels.FromStrings(
"__name__", "test_float_histogram_bucket",
"le", "+Inf",
),
},
{ // 36
m: "test_gauge_float_histogram",
help: "Like test_float_histogram but as gauge histogram.",
},
{ // 37
m: "test_gauge_float_histogram",
typ: MetricTypeGaugeHistogram,
},
{ // 38
m: "test_gauge_float_histogram",
t: 1234568,
fhs: &histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Count: 175.0,
ZeroCount: 2.0,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0},
NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0},
},
lset: labels.FromStrings(
"__name__", "test_gauge_float_histogram",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
{ // 39
m: "test_gauge_float_histogram_count",
t: 1234568,
v: 175,
lset: labels.FromStrings(
"__name__", "test_gauge_float_histogram_count",
),
},
{ // 40
m: "test_gauge_float_histogram_sum",
t: 1234568,
v: 0.0008280461746287094,
lset: labels.FromStrings(
"__name__", "test_gauge_float_histogram_sum",
),
},
{ // 41
m: "test_gauge_float_histogram_bucket\xffle\xff-0.0004899999999999998",
t: 1234568,
v: 2,
lset: labels.FromStrings(
"__name__", "test_gauge_float_histogram_bucket",
"le", "-0.0004899999999999998",
),
},
{ // 42
m: "test_gauge_float_histogram_bucket\xffle\xff-0.0003899999999999998",
t: 1234568,
v: 4,
lset: labels.FromStrings(
"__name__", "test_gauge_float_histogram_bucket",
"le", "-0.0003899999999999998",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
{ // 43
m: "test_gauge_float_histogram_bucket\xffle\xff-0.0002899999999999998",
t: 1234568,
v: 16,
lset: labels.FromStrings(
"__name__", "test_gauge_float_histogram_bucket",
"le", "-0.0002899999999999998",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
{ // 44
m: "test_gauge_float_histogram_bucket\xffle\xff+Inf",
t: 1234568,
v: 175,
lset: labels.FromStrings(
"__name__", "test_gauge_float_histogram_bucket",
"le", "+Inf",
),
},
{ // 45
m: "test_histogram2",
help: "Similar histogram as before but now without sparse buckets.",
},
{ // 46
m: "test_histogram2",
typ: MetricTypeHistogram,
},
{ // 47
m: "test_histogram2_count",
v: 175,
lset: labels.FromStrings(
"__name__", "test_histogram2_count",
),
},
{ // 48
m: "test_histogram2_sum",
v: 0.000828,
lset: labels.FromStrings(
"__name__", "test_histogram2_sum",
),
},
{ // 49
m: "test_histogram2_bucket\xffle\xff-0.00048",
v: 2,
lset: labels.FromStrings(
"__name__", "test_histogram2_bucket",
"le", "-0.00048",
),
},
{ // 50
m: "test_histogram2_bucket\xffle\xff-0.00038",
v: 4,
lset: labels.FromStrings(
"__name__", "test_histogram2_bucket",
"le", "-0.00038",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146},
},
},
{ // 51
m: "test_histogram2_bucket\xffle\xff1.0",
v: 16,
lset: labels.FromStrings(
"__name__", "test_histogram2_bucket",
"le", "1.0",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false},
},
},
{ // 52
m: "test_histogram2_bucket\xffle\xff+Inf",
v: 175,
lset: labels.FromStrings(
"__name__", "test_histogram2_bucket",
"le", "+Inf",
),
},
{ // 53
m: "rpc_durations_seconds",
help: "RPC latency distributions.",
},
{ // 54
m: "rpc_durations_seconds",
typ: MetricTypeSummary,
},
{ // 55
m: "rpc_durations_seconds_count\xffservice\xffexponential",
v: 262,
lset: labels.FromStrings(
"__name__", "rpc_durations_seconds_count",
"service", "exponential",
),
},
{ // 56
m: "rpc_durations_seconds_sum\xffservice\xffexponential",
v: 0.00025551262820703587,
lset: labels.FromStrings(
"__name__", "rpc_durations_seconds_sum",
"service", "exponential",
),
},
{ // 57
m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5",
v: 6.442786329648548e-07,
lset: labels.FromStrings(
"__name__", "rpc_durations_seconds",
"quantile", "0.5",
"service", "exponential",
),
},
{ // 58
m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9",
v: 1.9435742936658396e-06,
lset: labels.FromStrings(
"__name__", "rpc_durations_seconds",
"quantile", "0.9",
"service", "exponential",
),
},
{ // 59
m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99",
v: 4.0471608667037015e-06,
lset: labels.FromStrings(
"__name__", "rpc_durations_seconds",
"quantile", "0.99",
"service", "exponential",
),
},
{ // 60
m: "without_quantiles",
help: "A summary without quantiles.",
},
{ // 61
m: "without_quantiles",
typ: MetricTypeSummary,
},
{ // 62
m: "without_quantiles_count",
v: 42,
lset: labels.FromStrings(
"__name__", "without_quantiles_count",
),
},
{ // 63
m: "without_quantiles_sum",
v: 1.234,
lset: labels.FromStrings(
"__name__", "without_quantiles_sum",
),
},
},
},
} }
p := NewProtobufParser(inputBuf.Bytes()) for _, scenario := range scenarios {
i := 0 t.Run(scenario.name, func(t *testing.T) {
var (
var res labels.Labels i int
res labels.Labels
p = scenario.parser
exp = scenario.expected
)
for { for {
et, err := p.Next() et, err := p.Next()
@ -891,4 +1480,6 @@ metric: <
i++ i++
} }
require.Equal(t, len(exp), i) require.Equal(t, len(exp), i)
})
}
} }

View file

@ -58,7 +58,7 @@ const (
) )
func fuzzParseMetricWithContentType(in []byte, contentType string) int { func fuzzParseMetricWithContentType(in []byte, contentType string) int {
p, warning := textparse.New(in, contentType) p, warning := textparse.New(in, contentType, false)
if warning != nil { if warning != nil {
// An invalid content type is being passed, which should not happen // An invalid content type is being passed, which should not happen
// in this context. // in this context.

View file

@ -269,6 +269,7 @@ type scrapeLoopOptions struct {
honorTimestamps bool honorTimestamps bool
interval time.Duration interval time.Duration
timeout time.Duration timeout time.Duration
scrapeClassicHistograms bool
mrc []*relabel.Config mrc []*relabel.Config
cache *scrapeCache cache *scrapeCache
} }
@ -331,6 +332,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
opts.labelLimits, opts.labelLimits,
opts.interval, opts.interval,
opts.timeout, opts.timeout,
opts.scrapeClassicHistograms,
options.ExtraMetrics, options.ExtraMetrics,
options.EnableMetadataStorage, options.EnableMetadataStorage,
opts.target, opts.target,
@ -550,6 +552,7 @@ func (sp *scrapePool) sync(targets []*Target) {
honorLabels = sp.config.HonorLabels honorLabels = sp.config.HonorLabels
honorTimestamps = sp.config.HonorTimestamps honorTimestamps = sp.config.HonorTimestamps
mrc = sp.config.MetricRelabelConfigs mrc = sp.config.MetricRelabelConfigs
scrapeClassicHistograms = sp.config.ScrapeClassicHistograms
) )
sp.targetMtx.Lock() sp.targetMtx.Lock()
@ -578,6 +581,7 @@ func (sp *scrapePool) sync(targets []*Target) {
mrc: mrc, mrc: mrc,
interval: interval, interval: interval,
timeout: timeout, timeout: timeout,
scrapeClassicHistograms: scrapeClassicHistograms,
}) })
if err != nil { if err != nil {
l.setForcedError(err) l.setForcedError(err)
@ -896,6 +900,7 @@ type scrapeLoop struct {
labelLimits *labelLimits labelLimits *labelLimits
interval time.Duration interval time.Duration
timeout time.Duration timeout time.Duration
scrapeClassicHistograms bool
appender func(ctx context.Context) storage.Appender appender func(ctx context.Context) storage.Appender
sampleMutator labelsMutator sampleMutator labelsMutator
@ -1177,6 +1182,7 @@ func newScrapeLoop(ctx context.Context,
labelLimits *labelLimits, labelLimits *labelLimits,
interval time.Duration, interval time.Duration,
timeout time.Duration, timeout time.Duration,
scrapeClassicHistograms bool,
reportExtraMetrics bool, reportExtraMetrics bool,
appendMetadataToWAL bool, appendMetadataToWAL bool,
target *Target, target *Target,
@ -1221,6 +1227,7 @@ func newScrapeLoop(ctx context.Context,
labelLimits: labelLimits, labelLimits: labelLimits,
interval: interval, interval: interval,
timeout: timeout, timeout: timeout,
scrapeClassicHistograms: scrapeClassicHistograms,
reportExtraMetrics: reportExtraMetrics, reportExtraMetrics: reportExtraMetrics,
appendMetadataToWAL: appendMetadataToWAL, appendMetadataToWAL: appendMetadataToWAL,
} }
@ -1492,7 +1499,7 @@ type appendErrors struct {
} }
func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) {
p, err := textparse.New(b, contentType) p, err := textparse.New(b, contentType, sl.scrapeClassicHistograms)
if err != nil { if err != nil {
level.Debug(sl.l).Log( level.Debug(sl.l).Log(
"msg", "Invalid content type on scrape, using prometheus parser as fallback.", "msg", "Invalid content type on scrape, using prometheus parser as fallback.",

View file

@ -633,6 +633,7 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) {
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -705,6 +706,7 @@ func TestScrapeLoopStop(t *testing.T) {
time.Hour, time.Hour,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -781,6 +783,7 @@ func TestScrapeLoopRun(t *testing.T) {
time.Hour, time.Hour,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -836,6 +839,7 @@ func TestScrapeLoopRun(t *testing.T) {
100*time.Millisecond, 100*time.Millisecond,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -895,6 +899,7 @@ func TestScrapeLoopForcedErr(t *testing.T) {
time.Hour, time.Hour,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -953,6 +958,7 @@ func TestScrapeLoopMetadata(t *testing.T) {
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1010,6 +1016,7 @@ func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) {
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1070,6 +1077,7 @@ func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) {
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1148,6 +1156,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
time.Hour, time.Hour,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1211,6 +1220,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
time.Hour, time.Hour,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1277,6 +1287,7 @@ func TestScrapeLoopCache(t *testing.T) {
time.Hour, time.Hour,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1360,6 +1371,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
time.Hour, time.Hour,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1474,6 +1486,7 @@ func TestScrapeLoopAppend(t *testing.T) {
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1563,7 +1576,7 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) {
return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil) return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil)
}, },
nil, nil,
func(ctx context.Context) storage.Appender { return app }, nil, 0, true, 0, 0, nil, 0, 0, false, false, nil, false, func(ctx context.Context) storage.Appender { return app }, nil, 0, true, 0, 0, nil, 0, 0, false, false, false, nil, false,
) )
slApp := sl.appender(context.Background()) slApp := sl.appender(context.Background())
_, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)) _, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC))
@ -1600,6 +1613,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1607,7 +1621,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
fakeRef := storage.SeriesRef(1) fakeRef := storage.SeriesRef(1)
expValue := float64(1) expValue := float64(1)
metric := []byte(`metric{n="1"} 1`) metric := []byte(`metric{n="1"} 1`)
p, warning := textparse.New(metric, "") p, warning := textparse.New(metric, "", false)
require.NoError(t, warning) require.NoError(t, warning)
var lset labels.Labels var lset labels.Labels
@ -1658,6 +1672,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1735,6 +1750,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1833,6 +1849,7 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) {
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1881,6 +1898,7 @@ func TestScrapeLoopAppendStaleness(t *testing.T) {
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1932,6 +1950,7 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2043,6 +2062,7 @@ metric_total{n="2"} 2 # {t="2"} 2.0 20000
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2108,6 +2128,7 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) {
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2160,6 +2181,7 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
time.Hour, time.Hour,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2196,6 +2218,7 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
time.Hour, time.Hour,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2245,6 +2268,7 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2290,6 +2314,7 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2562,6 +2587,7 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) {
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2603,6 +2629,7 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2643,6 +2670,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2701,6 +2729,7 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2964,6 +2993,7 @@ func TestScrapeAddFast(t *testing.T) {
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -3050,6 +3080,7 @@ func TestScrapeReportSingleAppender(t *testing.T) {
time.Hour, time.Hour,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -3252,6 +3283,7 @@ func TestScrapeLoopLabelLimit(t *testing.T) {
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )

View file

@ -378,7 +378,7 @@ func TestFederationWithNativeHistograms(t *testing.T) {
body, err := io.ReadAll(res.Body) body, err := io.ReadAll(res.Body)
require.NoError(t, err) require.NoError(t, err)
p := textparse.NewProtobufParser(body) p := textparse.NewProtobufParser(body, false)
var actVec promql.Vector var actVec promql.Vector
metricFamilies := 0 metricFamilies := 0
l := labels.Labels{} l := labels.Labels{}