Merge pull request #395 from grafana/merge-jan-16-upstream

Merge Jan 16 from Upstream
This commit is contained in:
FG 2023-01-17 09:52:02 +00:00 committed by GitHub
commit d44f63ec48
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
61 changed files with 2903 additions and 517 deletions

View file

@ -209,7 +209,7 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -432,7 +432,7 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
```
@ -514,7 +514,7 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
@ -619,7 +619,7 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
@ -700,7 +700,7 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
@ -752,7 +752,7 @@ host: <string>
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# TLS configuration.
@ -921,7 +921,7 @@ host: <string>
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# TLS configuration.
@ -1122,7 +1122,7 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
@ -1391,7 +1391,7 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
@ -1602,7 +1602,7 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
@ -1690,7 +1690,7 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
@ -1767,7 +1767,7 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
@ -1983,7 +1983,7 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
@ -2063,7 +2063,7 @@ server: <string>
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# TLS configuration.
@ -2184,7 +2184,7 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
@ -2259,7 +2259,7 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
@ -2361,7 +2361,7 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
```
@ -2454,7 +2454,7 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
@ -2634,7 +2634,7 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
@ -2753,7 +2753,7 @@ tags_filter:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# TLS configuration.
@ -2823,7 +2823,7 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
@ -2899,7 +2899,7 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
@ -3095,7 +3095,7 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
@ -3307,7 +3307,7 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
@ -3414,7 +3414,7 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.

View file

@ -17,9 +17,9 @@ Rule files use YAML.
The rule files can be reloaded at runtime by sending `SIGHUP` to the Prometheus
process. The changes are only applied if all rule files are well-formatted.
_Note about native histograms (experimental feature): Rules evaluating to
native histograms do not yet work as expected. Instead of a native histogram,
the sample stored is just a floating point value of zero._
_Note about native histograms (experimental feature): Native histogram are always
recorded as gauge histograms (for now). Most cases will create gauge histograms
naturally, e.g. after `rate()`._
## Syntax-checking rules
@ -70,8 +70,8 @@ A simple example rules file would be:
groups:
- name: example
rules:
- record: job:http_inprogress_requests:sum
expr: sum by (job) (http_inprogress_requests)
- record: code:prometheus_http_requests_total:sum
expr: sum by (code) (prometheus_http_requests_total)
```
### `<rule_group>`

View file

@ -8,8 +8,15 @@ sort_rank: 6
Federation allows a Prometheus server to scrape selected time series from
another Prometheus server.
_Note about native histograms (experimental feature): Federation does not
support native histograms yet._
_Note about native histograms (experimental feature): To scrape native histograms
via federation, the scraping Prometheus server needs to run with native histograms
enabled (via the command line flag `--enable-feature=native-histograms`), implying
that the protobuf format is used for scraping. Should the federated metrics contain
a mix of different sample types (float64, counter histogram, gauge histogram) for
the same metric name, the federation payload will contain multiple metric families
with the same name (but different types). Technically, this violates the rules of
the protobuf exposition format, but Prometheus is nevertheless able to ingest all
metrics correctly._
## Use cases

View file

@ -33,6 +33,15 @@ metadata:
name: prometheus
namespace: default
---
apiVersion: v1
kind: Secret
metadata:
name: prometheus-sa-token
namespace: default
annotations:
kubernetes.io/service-account.name: prometheus
type: kubernetes.io/service-account-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:

View file

@ -27,6 +27,8 @@ import (
// used to represent a histogram with integer counts and thus serves as a more
// generalized representation.
type FloatHistogram struct {
// Counter reset information.
CounterResetHint CounterResetHint
// Currently valid schema numbers are -4 <= n <= 8. They are all for
// base-2 bucket schemas, where 1 is a bucket boundary in each case, and
// then each power of two is divided into 2^n logarithmic buckets. Or

View file

@ -19,6 +19,17 @@ import (
"strings"
)
// CounterResetHint contains the known information about a counter reset,
// or alternatively that we are dealing with a gauge histogram, where counter resets do not apply.
type CounterResetHint byte
const (
UnknownCounterReset CounterResetHint = iota // UnknownCounterReset means we cannot say if this histogram signals a counter reset or not.
CounterReset // CounterReset means there was definitely a counter reset starting from this histogram.
NotCounterReset // NotCounterReset means there was definitely no counter reset with this histogram.
GaugeType // GaugeType means this is a gauge histogram, where counter resets do not happen.
)
// Histogram encodes a sparse, high-resolution histogram. See the design
// document for full details:
// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit#
@ -35,6 +46,8 @@ import (
//
// Which bucket indices are actually used is determined by the spans.
type Histogram struct {
// Counter reset information.
CounterResetHint CounterResetHint
// Currently valid schema numbers are -4 <= n <= 8. They are all for
// base-2 bucket schemas, where 1 is a bucket boundary in each case, and
// then each power of two is divided into 2^n logarithmic buckets. Or
@ -295,15 +308,16 @@ func (h *Histogram) ToFloat() *FloatHistogram {
}
return &FloatHistogram{
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: float64(h.ZeroCount),
Count: float64(h.Count),
Sum: h.Sum,
PositiveSpans: positiveSpans,
NegativeSpans: negativeSpans,
PositiveBuckets: positiveBuckets,
NegativeBuckets: negativeBuckets,
CounterResetHint: h.CounterResetHint,
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: float64(h.ZeroCount),
Count: float64(h.Count),
Sum: h.Sum,
PositiveSpans: positiveSpans,
NegativeSpans: negativeSpans,
PositiveBuckets: positiveBuckets,
NegativeBuckets: negativeBuckets,
}
}

View file

@ -113,8 +113,8 @@ func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) {
return p.series, nil, p.val
}
// Histogram always returns (nil, nil, nil, nil) because OpenMetrics does not support
// sparse histograms.
// Histogram returns (nil, nil, nil, nil) for now because OpenMetrics does not
// support sparse histograms yet.
func (p *OpenMetricsParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) {
return nil, nil, nil, nil
}

View file

@ -168,8 +168,8 @@ func (p *PromParser) Series() ([]byte, *int64, float64) {
return p.series, nil, p.val
}
// Histogram always returns (nil, nil, nil, nil) because the Prometheus text format
// does not support sparse histograms.
// Histogram returns (nil, nil, nil, nil) for now because the Prometheus text
// format does not support sparse histograms yet.
func (p *PromParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) {
return nil, nil, nil, nil
}

View file

@ -105,7 +105,7 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) {
default:
v = s.GetQuantile()[p.fieldPos].GetValue()
}
case dto.MetricType_HISTOGRAM:
case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM:
// This should only happen for a legacy histogram.
h := m.GetHistogram()
switch p.fieldPos {
@ -170,6 +170,9 @@ func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *his
fh.NegativeSpans[i].Offset = span.GetOffset()
fh.NegativeSpans[i].Length = span.GetLength()
}
if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM {
fh.CounterResetHint = histogram.GaugeType
}
fh.Compact(0)
if ts != 0 {
return p.metricBytes.Bytes(), &ts, nil, &fh
@ -199,6 +202,9 @@ func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *his
sh.NegativeSpans[i].Offset = span.GetOffset()
sh.NegativeSpans[i].Length = span.GetLength()
}
if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM {
sh.CounterResetHint = histogram.GaugeType
}
sh.Compact(0)
if ts != 0 {
return p.metricBytes.Bytes(), &ts, &sh, nil
@ -225,6 +231,8 @@ func (p *ProtobufParser) Type() ([]byte, MetricType) {
return n, MetricTypeGauge
case dto.MetricType_HISTOGRAM:
return n, MetricTypeHistogram
case dto.MetricType_GAUGE_HISTOGRAM:
return n, MetricTypeGaugeHistogram
case dto.MetricType_SUMMARY:
return n, MetricTypeSummary
}
@ -273,7 +281,7 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool {
switch p.mf.GetType() {
case dto.MetricType_COUNTER:
exProto = m.GetCounter().GetExemplar()
case dto.MetricType_HISTOGRAM:
case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM:
bb := m.GetHistogram().GetBucket()
if p.fieldPos < 0 {
if p.state == EntrySeries {
@ -331,7 +339,7 @@ func (p *ProtobufParser) Next() (Entry, error) {
}
// We are at the beginning of a metric family. Put only the name
// into metricBytes and validate only name and help for now.
// into metricBytes and validate only name, help, and type for now.
name := p.mf.GetName()
if !model.IsValidMetricName(model.LabelValue(name)) {
return EntryInvalid, errors.Errorf("invalid metric name: %s", name)
@ -339,6 +347,17 @@ func (p *ProtobufParser) Next() (Entry, error) {
if help := p.mf.GetHelp(); !utf8.ValidString(help) {
return EntryInvalid, errors.Errorf("invalid help for metric %q: %s", name, help)
}
switch p.mf.GetType() {
case dto.MetricType_COUNTER,
dto.MetricType_GAUGE,
dto.MetricType_HISTOGRAM,
dto.MetricType_GAUGE_HISTOGRAM,
dto.MetricType_SUMMARY,
dto.MetricType_UNTYPED:
// All good.
default:
return EntryInvalid, errors.Errorf("unknown metric type for metric %q: %s", name, p.mf.GetType())
}
p.metricBytes.Reset()
p.metricBytes.WriteString(name)
@ -346,7 +365,8 @@ func (p *ProtobufParser) Next() (Entry, error) {
case EntryHelp:
p.state = EntryType
case EntryType:
if p.mf.GetType() == dto.MetricType_HISTOGRAM &&
t := p.mf.GetType()
if (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) &&
isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) {
p.state = EntryHistogram
} else {
@ -356,8 +376,11 @@ func (p *ProtobufParser) Next() (Entry, error) {
return EntryInvalid, err
}
case EntryHistogram, EntrySeries:
t := p.mf.GetType()
if p.state == EntrySeries && !p.fieldsDone &&
(p.mf.GetType() == dto.MetricType_SUMMARY || p.mf.GetType() == dto.MetricType_HISTOGRAM) {
(t == dto.MetricType_SUMMARY ||
t == dto.MetricType_HISTOGRAM ||
t == dto.MetricType_GAUGE_HISTOGRAM) {
p.fieldPos++
} else {
p.metricPos++
@ -418,7 +441,7 @@ func (p *ProtobufParser) getMagicName() string {
if p.fieldPos == -1 {
return p.mf.GetName() + "_sum"
}
if t == dto.MetricType_HISTOGRAM {
if t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM {
return p.mf.GetName() + "_bucket"
}
return p.mf.GetName()
@ -436,7 +459,7 @@ func (p *ProtobufParser) getMagicLabel() (bool, string, string) {
q := qq[p.fieldPos]
p.fieldsDone = p.fieldPos == len(qq)-1
return true, model.QuantileLabel, formatOpenMetricsFloat(q.GetQuantile())
case dto.MetricType_HISTOGRAM:
case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM:
bb := p.mf.GetMetric()[p.metricPos].GetHistogram().GetBucket()
if p.fieldPos >= len(bb) {
p.fieldsDone = true

View file

@ -16,6 +16,7 @@ package textparse
import (
"bytes"
"encoding/binary"
"errors"
"io"
"testing"
@ -155,7 +156,76 @@ metric: <
>
`,
`name: "test_gauge_histogram"
help: "Like test_histogram but as gauge histogram."
type: GAUGE_HISTOGRAM
metric: <
histogram: <
sample_count: 175
sample_sum: 0.0008280461746287094
bucket: <
cumulative_count: 2
upper_bound: -0.0004899999999999998
>
bucket: <
cumulative_count: 4
upper_bound: -0.0003899999999999998
exemplar: <
label: <
name: "dummyID"
value: "59727"
>
value: -0.00039
timestamp: <
seconds: 1625851155
nanos: 146848499
>
>
>
bucket: <
cumulative_count: 16
upper_bound: -0.0002899999999999998
exemplar: <
label: <
name: "dummyID"
value: "5617"
>
value: -0.00029
>
>
schema: 3
zero_threshold: 2.938735877055719e-39
zero_count: 2
negative_span: <
offset: -162
length: 1
>
negative_span: <
offset: 23
length: 4
>
negative_delta: 1
negative_delta: 3
negative_delta: -2
negative_delta: -1
negative_delta: 1
positive_span: <
offset: -161
length: 1
>
positive_span: <
offset: 8
length: 3
>
positive_delta: 1
positive_delta: 2
positive_delta: -1
positive_delta: -1
>
timestamp_ms: 1234568
>
`,
`name: "test_float_histogram"
help: "Test float histogram with many buckets removed to keep it manageable in size."
type: HISTOGRAM
@ -226,6 +296,77 @@ metric: <
timestamp_ms: 1234568
>
`,
`name: "test_gauge_float_histogram"
help: "Like test_float_histogram but as gauge histogram."
type: GAUGE_HISTOGRAM
metric: <
histogram: <
sample_count: 175
sample_count_float: 175.0
sample_sum: 0.0008280461746287094
bucket: <
cumulative_count_float: 2.0
upper_bound: -0.0004899999999999998
>
bucket: <
cumulative_count_float: 4.0
upper_bound: -0.0003899999999999998
exemplar: <
label: <
name: "dummyID"
value: "59727"
>
value: -0.00039
timestamp: <
seconds: 1625851155
nanos: 146848499
>
>
>
bucket: <
cumulative_count_float: 16
upper_bound: -0.0002899999999999998
exemplar: <
label: <
name: "dummyID"
value: "5617"
>
value: -0.00029
>
>
schema: 3
zero_threshold: 2.938735877055719e-39
zero_count_float: 2.0
negative_span: <
offset: -162
length: 1
>
negative_span: <
offset: 23
length: 4
>
negative_count: 1.0
negative_count: 3.0
negative_count: -2.0
negative_count: -1.0
negative_count: 1.0
positive_span: <
offset: -161
length: 1
>
positive_span: <
offset: 8
length: 3
>
positive_count: 1.0
positive_count: 2.0
positive_count: -1.0
positive_count: -1.0
>
timestamp_ms: 1234568
>
`,
`name: "test_histogram2"
help: "Similar histogram as before but now without sparse buckets."
@ -426,6 +567,43 @@ metric: <
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
{
m: "test_gauge_histogram",
help: "Like test_histogram but as gauge histogram.",
},
{
m: "test_gauge_histogram",
typ: MetricTypeGaugeHistogram,
},
{
m: "test_gauge_histogram",
t: 1234568,
shs: &histogram.Histogram{
CounterResetHint: histogram.GaugeType,
Count: 175,
ZeroCount: 2,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
lset: labels.FromStrings(
"__name__", "test_gauge_histogram",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
{
m: "test_float_histogram",
help: "Test float histogram with many buckets removed to keep it manageable in size.",
@ -462,6 +640,43 @@ metric: <
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
{
m: "test_gauge_float_histogram",
help: "Like test_float_histogram but as gauge histogram.",
},
{
m: "test_gauge_float_histogram",
typ: MetricTypeGaugeHistogram,
},
{
m: "test_gauge_float_histogram",
t: 1234568,
fhs: &histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Count: 175.0,
ZeroCount: 2.0,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0},
NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0},
},
lset: labels.FromStrings(
"__name__", "test_gauge_float_histogram",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
{
m: "test_histogram2",
help: "Similar histogram as before but now without sparse buckets.",
@ -604,7 +819,7 @@ metric: <
for {
et, err := p.Next()
if err == io.EOF {
if errors.Is(err, io.EOF) {
break
}
require.NoError(t, err)

View file

@ -174,6 +174,15 @@ func rangeQueryCases() []benchCase {
{
expr: "a_X + on(l) group_right a_one",
},
// Label compared to blank string.
{
expr: "count({__name__!=\"\"})",
steps: 1,
},
{
expr: "count({__name__!=\"\",l=\"\"})",
steps: 1,
},
}
// X in an expr will be replaced by different metric sizes.

View file

@ -3184,10 +3184,12 @@ func TestSparseHistogramRate(t *testing.T) {
Schema: 1,
ZeroThreshold: 0.001,
ZeroCount: 1. / 15.,
Count: 4. / 15.,
Count: 8. / 15.,
Sum: 1.226666666666667,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}},
PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.},
NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}},
NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.},
}
require.Equal(t, expectedHistogram, actualHistogram)
}
@ -3225,10 +3227,12 @@ func TestSparseFloatHistogramRate(t *testing.T) {
Schema: 1,
ZeroThreshold: 0.001,
ZeroCount: 1. / 15.,
Count: 4. / 15.,
Count: 8. / 15.,
Sum: 1.226666666666667,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}},
PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.},
NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}},
NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.},
}
require.Equal(t, expectedHistogram, actualHistogram)
}

View file

@ -31,6 +31,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/model/timestamp"
@ -681,7 +682,16 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
}()
for _, s := range vector {
if _, err := app.Append(0, s.Metric, s.T, s.V); err != nil {
if s.H != nil {
// We assume that all native histogram results are gauge histograms.
// TODO(codesome): once PromQL can give the counter reset info, remove this assumption.
s.H.CounterResetHint = histogram.GaugeType
_, err = app.AppendHistogram(0, s.Metric, s.T, nil, s.H)
} else {
_, err = app.Append(0, s.Metric, s.T, s.V)
}
if err != nil {
rule.SetHealth(HealthBad)
rule.SetLastError(err)
sp.SetStatus(codes.Error, err.Error())

View file

@ -32,6 +32,7 @@ import (
"go.uber.org/goleak"
"gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/model/timestamp"
@ -39,6 +40,7 @@ import (
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/util/teststorage"
)
@ -1731,3 +1733,68 @@ func TestUpdateMissedEvalMetrics(t *testing.T) {
testFunc(tst)
}
}
func TestNativeHistogramsInRecordingRules(t *testing.T) {
suite, err := promql.NewTest(t, "")
require.NoError(t, err)
t.Cleanup(suite.Close)
err = suite.Run()
require.NoError(t, err)
// Add some histograms.
db := suite.TSDB()
hists := tsdb.GenerateTestHistograms(5)
ts := time.Now()
app := db.Appender(context.Background())
for i, h := range hists {
l := labels.FromStrings("__name__", "histogram_metric", "idx", fmt.Sprintf("%d", i))
_, err := app.AppendHistogram(0, l, ts.UnixMilli(), h.Copy(), nil)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
opts := &ManagerOptions{
QueryFunc: EngineQueryFunc(suite.QueryEngine(), suite.Storage()),
Appendable: suite.Storage(),
Queryable: suite.Storage(),
Context: context.Background(),
Logger: log.NewNopLogger(),
}
expr, err := parser.ParseExpr("sum(histogram_metric)")
require.NoError(t, err)
rule := NewRecordingRule("sum:histogram_metric", expr, labels.Labels{})
group := NewGroup(GroupOptions{
Name: "default",
Interval: time.Hour,
Rules: []Rule{rule},
ShouldRestore: true,
Opts: opts,
})
group.Eval(context.Background(), ts.Add(10*time.Second))
q, err := db.Querier(context.Background(), ts.UnixMilli(), ts.Add(20*time.Second).UnixMilli())
require.NoError(t, err)
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "__name__", "sum:histogram_metric"))
require.True(t, ss.Next())
s := ss.At()
require.False(t, ss.Next())
require.Equal(t, labels.FromStrings("__name__", "sum:histogram_metric"), s.Labels())
expHist := hists[0].ToFloat()
for _, h := range hists[1:] {
expHist = expHist.Add(h.ToFloat())
}
expHist.CounterResetHint = histogram.GaugeType
it := s.Iterator(nil)
require.Equal(t, chunkenc.ValFloatHistogram, it.Next())
tsp, fh := it.AtFloatHistogram()
require.Equal(t, ts.Add(10*time.Second).UnixMilli(), tsp)
require.Equal(t, expHist, fh)
require.Equal(t, chunkenc.ValNone, it.Next())
}

View file

@ -1544,7 +1544,7 @@ loop:
fh *histogram.FloatHistogram
)
if et, err = p.Next(); err != nil {
if err == io.EOF {
if errors.Is(err, io.EOF) {
err = nil
}
break

View file

@ -68,9 +68,11 @@ func (b *BufferedSeriesIterator) ReduceDelta(delta int64) bool {
// PeekBack returns the nth previous element of the iterator. If there is none buffered,
// ok is false.
func (b *BufferedSeriesIterator) PeekBack(n int) (t int64, v float64, h *histogram.Histogram, ok bool) {
func (b *BufferedSeriesIterator) PeekBack(n int) (
t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, ok bool,
) {
s, ok := b.buf.nthLast(n)
return s.t, s.v, s.h, ok
return s.t, s.v, s.h, s.fh, ok
}
// Buffer returns an iterator over the buffered data. Invalidates previously

View file

@ -107,7 +107,7 @@ func TestBufferedSeriesIterator(t *testing.T) {
require.Equal(t, ev, v, "value mismatch")
}
prevSampleEq := func(ets int64, ev float64, eok bool) {
ts, v, _, ok := it.PeekBack(1)
ts, v, _, _, ok := it.PeekBack(1)
require.Equal(t, eok, ok, "exist mismatch")
require.Equal(t, ets, ts, "timestamp mismatch")
require.Equal(t, ev, v, "value mismatch")

View file

@ -525,7 +525,7 @@ func exemplarProtoToExemplar(ep prompb.Exemplar) exemplar.Exemplar {
// HistogramProtoToHistogram extracts a (normal integer) Histogram from the
// provided proto message. The caller has to make sure that the proto message
// represents an interger histogram and not a float histogram.
// represents an integer histogram and not a float histogram.
func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram {
return &histogram.Histogram{
Schema: hp.Schema,
@ -540,6 +540,23 @@ func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram {
}
}
// HistogramProtoToFloatHistogram extracts a (normal integer) Histogram from the
// provided proto message to a Float Histogram. The caller has to make sure that
// the proto message represents an float histogram and not a integer histogram.
func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram {
return &histogram.FloatHistogram{
Schema: hp.Schema,
ZeroThreshold: hp.ZeroThreshold,
ZeroCount: hp.GetZeroCountFloat(),
Count: hp.GetCountFloat(),
Sum: hp.Sum,
PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
PositiveBuckets: hp.GetPositiveCounts(),
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
NegativeBuckets: hp.GetNegativeCounts(),
}
}
func spansProtoToSpans(s []*prompb.BucketSpan) []histogram.Span {
spans := make([]histogram.Span, len(s))
for i := 0; i < len(s); i++ {
@ -564,6 +581,21 @@ func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) prompb.H
}
}
func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogram) prompb.Histogram {
return prompb.Histogram{
Count: &prompb.Histogram_CountFloat{CountFloat: fh.Count},
Sum: fh.Sum,
Schema: fh.Schema,
ZeroThreshold: fh.ZeroThreshold,
ZeroCount: &prompb.Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
NegativeSpans: spansToSpansProto(fh.NegativeSpans),
NegativeCounts: fh.NegativeBuckets,
PositiveSpans: spansToSpansProto(fh.PositiveSpans),
PositiveCounts: fh.PositiveBuckets,
Timestamp: timestamp,
}
}
func spansToSpansProto(s []histogram.Span) []*prompb.BucketSpan {
spans := make([]*prompb.BucketSpan, len(s))
for i := 0; i < len(s); i++ {

View file

@ -55,7 +55,7 @@ var writeRequestFixture = &prompb.WriteRequest{
},
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 0}},
Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram)},
Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat())},
},
{
Labels: []prompb.Label{
@ -67,7 +67,7 @@ var writeRequestFixture = &prompb.WriteRequest{
},
Samples: []prompb.Sample{{Value: 2, Timestamp: 1}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 1}},
Histograms: []prompb.Histogram{HistogramToHistogramProto(1, &testHistogram)},
Histograms: []prompb.Histogram{HistogramToHistogramProto(2, &testHistogram), FloatHistogramToHistogramProto(3, testHistogram.ToFloat())},
},
},
}
@ -368,6 +368,7 @@ func TestNilHistogramProto(t *testing.T) {
// This function will panic if it impromperly handles nil
// values, causing the test to fail.
HistogramProtoToHistogram(prompb.Histogram{})
HistogramProtoToFloatHistogram(prompb.Histogram{})
}
func TestStreamResponse(t *testing.T) {

View file

@ -716,6 +716,53 @@ outer:
return true
}
func (t *QueueManager) AppendFloatHistograms(floatHistograms []record.RefFloatHistogramSample) bool {
if !t.sendNativeHistograms {
return true
}
outer:
for _, h := range floatHistograms {
t.seriesMtx.Lock()
lbls, ok := t.seriesLabels[h.Ref]
if !ok {
t.metrics.droppedHistogramsTotal.Inc()
t.dataDropped.incr(1)
if _, ok := t.droppedSeries[h.Ref]; !ok {
level.Info(t.logger).Log("msg", "Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref)
}
t.seriesMtx.Unlock()
continue
}
t.seriesMtx.Unlock()
backoff := model.Duration(5 * time.Millisecond)
for {
select {
case <-t.quit:
return false
default:
}
if t.shards.enqueue(h.Ref, timeSeries{
seriesLabels: lbls,
timestamp: h.T,
floatHistogram: h.FH,
sType: tFloatHistogram,
}) {
continue outer
}
t.metrics.enqueueRetriesTotal.Inc()
time.Sleep(time.Duration(backoff))
backoff = backoff * 2
if backoff > t.cfg.MaxBackoff {
backoff = t.cfg.MaxBackoff
}
}
}
return true
}
// Start the queue manager sending samples to the remote storage.
// Does not block.
func (t *QueueManager) Start() {
@ -1129,7 +1176,7 @@ func (s *shards) enqueue(ref chunks.HeadSeriesRef, data timeSeries) bool {
case tExemplar:
s.qm.metrics.pendingExemplars.Inc()
s.enqueuedExemplars.Inc()
case tHistogram:
case tHistogram, tFloatHistogram:
s.qm.metrics.pendingHistograms.Inc()
s.enqueuedHistograms.Inc()
}
@ -1154,6 +1201,7 @@ type timeSeries struct {
seriesLabels labels.Labels
value float64
histogram *histogram.Histogram
floatHistogram *histogram.FloatHistogram
timestamp int64
exemplarLabels labels.Labels
// The type of series: sample, exemplar, or histogram.
@ -1166,6 +1214,7 @@ const (
tSample seriesType = iota
tExemplar
tHistogram
tFloatHistogram
)
func newQueue(batchSize, capacity int) *queue {
@ -1353,7 +1402,8 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
if len(batch) > 0 {
nPendingSamples, nPendingExemplars, nPendingHistograms := s.populateTimeSeries(batch, pendingData)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples, "exemplars", nPendingExemplars, "shard", shardNum)
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
"exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
}
queue.ReturnForReuse(batch)
@ -1394,6 +1444,9 @@ func (s *shards) populateTimeSeries(batch []timeSeries, pendingData []prompb.Tim
case tHistogram:
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, HistogramToHistogramProto(d.timestamp, d.histogram))
nPendingHistograms++
case tFloatHistogram:
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, FloatHistogramToHistogramProto(d.timestamp, d.floatHistogram))
nPendingHistograms++
}
}
return nPendingSamples, nPendingExemplars, nPendingHistograms

View file

@ -61,15 +61,17 @@ func newHighestTimestampMetric() *maxTimestamp {
func TestSampleDelivery(t *testing.T) {
testcases := []struct {
name string
samples bool
exemplars bool
histograms bool
name string
samples bool
exemplars bool
histograms bool
floatHistograms bool
}{
{samples: true, exemplars: false, histograms: false, name: "samples only"},
{samples: true, exemplars: true, histograms: true, name: "samples, exemplars, and histograms"},
{samples: false, exemplars: true, histograms: false, name: "exemplars only"},
{samples: false, exemplars: false, histograms: true, name: "histograms only"},
{samples: true, exemplars: false, histograms: false, floatHistograms: false, name: "samples only"},
{samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "samples, exemplars, and histograms"},
{samples: false, exemplars: true, histograms: false, floatHistograms: false, name: "exemplars only"},
{samples: false, exemplars: false, histograms: true, floatHistograms: false, name: "histograms only"},
{samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "float histograms only"},
}
// Let's create an even number of send batches so we don't run into the
@ -101,10 +103,11 @@ func TestSampleDelivery(t *testing.T) {
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
var (
series []record.RefSeries
samples []record.RefSample
exemplars []record.RefExemplar
histograms []record.RefHistogramSample
series []record.RefSeries
samples []record.RefSample
exemplars []record.RefExemplar
histograms []record.RefHistogramSample
floatHistograms []record.RefFloatHistogramSample
)
// Generates same series in both cases.
@ -115,7 +118,10 @@ func TestSampleDelivery(t *testing.T) {
exemplars, series = createExemplars(n, n)
}
if tc.histograms {
histograms, series = createHistograms(n, n)
histograms, _, series = createHistograms(n, n, false)
}
if tc.floatHistograms {
_, floatHistograms, series = createHistograms(n, n, true)
}
// Apply new config.
@ -135,18 +141,22 @@ func TestSampleDelivery(t *testing.T) {
c.expectSamples(samples[:len(samples)/2], series)
c.expectExemplars(exemplars[:len(exemplars)/2], series)
c.expectHistograms(histograms[:len(histograms)/2], series)
c.expectFloatHistograms(floatHistograms[:len(floatHistograms)/2], series)
qm.Append(samples[:len(samples)/2])
qm.AppendExemplars(exemplars[:len(exemplars)/2])
qm.AppendHistograms(histograms[:len(histograms)/2])
qm.AppendFloatHistograms(floatHistograms[:len(floatHistograms)/2])
c.waitForExpectedData(t)
// Send second half of data.
c.expectSamples(samples[len(samples)/2:], series)
c.expectExemplars(exemplars[len(exemplars)/2:], series)
c.expectHistograms(histograms[len(histograms)/2:], series)
c.expectFloatHistograms(floatHistograms[len(floatHistograms)/2:], series)
qm.Append(samples[len(samples)/2:])
qm.AppendExemplars(exemplars[len(exemplars)/2:])
qm.AppendHistograms(histograms[len(histograms)/2:])
qm.AppendFloatHistograms(floatHistograms[len(floatHistograms)/2:])
c.waitForExpectedData(t)
})
}
@ -586,35 +596,50 @@ func createExemplars(numExemplars, numSeries int) ([]record.RefExemplar, []recor
return exemplars, series
}
func createHistograms(numSamples, numSeries int) ([]record.RefHistogramSample, []record.RefSeries) {
func createHistograms(numSamples, numSeries int, floatHistogram bool) ([]record.RefHistogramSample, []record.RefFloatHistogramSample, []record.RefSeries) {
histograms := make([]record.RefHistogramSample, 0, numSamples)
floatHistograms := make([]record.RefFloatHistogramSample, 0, numSamples)
series := make([]record.RefSeries, 0, numSeries)
for i := 0; i < numSeries; i++ {
name := fmt.Sprintf("test_metric_%d", i)
for j := 0; j < numSamples; j++ {
h := record.RefHistogramSample{
Ref: chunks.HeadSeriesRef(i),
T: int64(j),
H: &histogram.Histogram{
Schema: 2,
ZeroThreshold: 1e-128,
ZeroCount: 0,
Count: 2,
Sum: 0,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []int64{int64(i) + 1},
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
NegativeBuckets: []int64{int64(-i) - 1},
},
hist := &histogram.Histogram{
Schema: 2,
ZeroThreshold: 1e-128,
ZeroCount: 0,
Count: 2,
Sum: 0,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []int64{int64(i) + 1},
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
NegativeBuckets: []int64{int64(-i) - 1},
}
if floatHistogram {
fh := record.RefFloatHistogramSample{
Ref: chunks.HeadSeriesRef(i),
T: int64(j),
FH: hist.ToFloat(),
}
floatHistograms = append(floatHistograms, fh)
} else {
h := record.RefHistogramSample{
Ref: chunks.HeadSeriesRef(i),
T: int64(j),
H: hist,
}
histograms = append(histograms, h)
}
histograms = append(histograms, h)
}
series = append(series, record.RefSeries{
Ref: chunks.HeadSeriesRef(i),
Labels: labels.FromStrings("__name__", name),
})
}
return histograms, series
if floatHistogram {
return nil, floatHistograms, series
}
return histograms, nil, series
}
func getSeriesNameFromRef(r record.RefSeries) string {
@ -622,18 +647,20 @@ func getSeriesNameFromRef(r record.RefSeries) string {
}
type TestWriteClient struct {
receivedSamples map[string][]prompb.Sample
expectedSamples map[string][]prompb.Sample
receivedExemplars map[string][]prompb.Exemplar
expectedExemplars map[string][]prompb.Exemplar
receivedHistograms map[string][]prompb.Histogram
expectedHistograms map[string][]prompb.Histogram
receivedMetadata map[string][]prompb.MetricMetadata
writesReceived int
withWaitGroup bool
wg sync.WaitGroup
mtx sync.Mutex
buf []byte
receivedSamples map[string][]prompb.Sample
expectedSamples map[string][]prompb.Sample
receivedExemplars map[string][]prompb.Exemplar
expectedExemplars map[string][]prompb.Exemplar
receivedHistograms map[string][]prompb.Histogram
receivedFloatHistograms map[string][]prompb.Histogram
expectedHistograms map[string][]prompb.Histogram
expectedFloatHistograms map[string][]prompb.Histogram
receivedMetadata map[string][]prompb.MetricMetadata
writesReceived int
withWaitGroup bool
wg sync.WaitGroup
mtx sync.Mutex
buf []byte
}
func NewTestWriteClient() *TestWriteClient {
@ -704,6 +731,23 @@ func (c *TestWriteClient) expectHistograms(hh []record.RefHistogramSample, serie
c.wg.Add(len(hh))
}
func (c *TestWriteClient) expectFloatHistograms(fhs []record.RefFloatHistogramSample, series []record.RefSeries) {
if !c.withWaitGroup {
return
}
c.mtx.Lock()
defer c.mtx.Unlock()
c.expectedFloatHistograms = map[string][]prompb.Histogram{}
c.receivedFloatHistograms = map[string][]prompb.Histogram{}
for _, fh := range fhs {
seriesName := getSeriesNameFromRef(series[fh.Ref])
c.expectedFloatHistograms[seriesName] = append(c.expectedFloatHistograms[seriesName], FloatHistogramToHistogramProto(fh.T, fh.FH))
}
c.wg.Add(len(fhs))
}
func (c *TestWriteClient) waitForExpectedData(tb testing.TB) {
if !c.withWaitGroup {
return
@ -720,6 +764,9 @@ func (c *TestWriteClient) waitForExpectedData(tb testing.TB) {
for ts, expectedHistogram := range c.expectedHistograms {
require.Equal(tb, expectedHistogram, c.receivedHistograms[ts], ts)
}
for ts, expectedFloatHistogram := range c.expectedFloatHistograms {
require.Equal(tb, expectedFloatHistogram, c.receivedFloatHistograms[ts], ts)
}
}
func (c *TestWriteClient) Store(_ context.Context, req []byte) error {
@ -755,7 +802,12 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte) error {
for _, histogram := range ts.Histograms {
count++
c.receivedHistograms[seriesName] = append(c.receivedHistograms[seriesName], histogram)
if histogram.GetCountFloat() > 0 || histogram.GetZeroCountFloat() > 0 {
c.receivedFloatHistograms[seriesName] = append(c.receivedFloatHistograms[seriesName], histogram)
} else {
c.receivedHistograms[seriesName] = append(c.receivedHistograms[seriesName], histogram)
}
}
}
if c.withWaitGroup {

View file

@ -15,6 +15,7 @@ package remote
import (
"bytes"
"errors"
"io"
"net/http"
"net/http/httptest"
@ -163,7 +164,7 @@ func BenchmarkStreamReadEndpoint(b *testing.B) {
for {
res := &prompb.ChunkedReadResponse{}
err := stream.NextProto(res)
if err == io.EOF {
if errors.Is(err, io.EOF) {
break
}
require.NoError(b, err)
@ -253,7 +254,7 @@ func TestStreamReadEndpoint(t *testing.T) {
for {
res := &prompb.ChunkedReadResponse{}
err := stream.NextProto(res)
if err == io.EOF {
if errors.Is(err, io.EOF) {
break
}
require.NoError(t, err)

View file

@ -124,16 +124,20 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
}
}
// TODO(codesome): support float histograms.
for _, hp := range ts.Histograms {
hs := HistogramProtoToHistogram(hp)
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hs, nil)
if hp.GetCountFloat() > 0 || hp.GetZeroCountFloat() > 0 { // It is a float histogram.
fhs := HistogramProtoToFloatHistogram(hp)
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, fhs)
} else {
hs := HistogramProtoToHistogram(hp)
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hs, nil)
}
if err != nil {
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
// Althogh AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
// a note indicating its inclusion in the future.
if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)

View file

@ -21,6 +21,7 @@ import (
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/go-kit/log"
"github.com/stretchr/testify/require"
@ -31,6 +32,7 @@ import (
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb"
)
func TestRemoteWriteHandler(t *testing.T) {
@ -66,8 +68,14 @@ func TestRemoteWriteHandler(t *testing.T) {
}
for _, hp := range ts.Histograms {
h := HistogramProtoToHistogram(hp)
require.Equal(t, mockHistogram{labels, hp.Timestamp, h, nil}, appendable.histograms[k])
if hp.GetCountFloat() > 0 || hp.GetZeroCountFloat() > 0 { // It is a float histogram.
fh := HistogramProtoToFloatHistogram(hp)
require.Equal(t, mockHistogram{labels, hp.Timestamp, nil, fh}, appendable.histograms[k])
} else {
h := HistogramProtoToHistogram(hp)
require.Equal(t, mockHistogram{labels, hp.Timestamp, h, nil}, appendable.histograms[k])
}
k++
}
}
@ -124,7 +132,7 @@ func TestOutOfOrderExemplar(t *testing.T) {
func TestOutOfOrderHistogram(t *testing.T) {
buf, _, err := buildWriteRequest([]prompb.TimeSeries{{
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram)},
Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat())},
}}, nil, nil, nil)
require.NoError(t, err)
@ -165,6 +173,65 @@ func TestCommitErr(t *testing.T) {
require.Equal(t, "commit error\n", string(body))
}
func BenchmarkRemoteWriteOOOSamples(b *testing.B) {
dir := b.TempDir()
opts := tsdb.DefaultOptions()
opts.OutOfOrderCapMax = 30
opts.OutOfOrderTimeWindow = 120 * time.Minute.Milliseconds()
db, err := tsdb.Open(dir, nil, nil, opts, nil)
require.NoError(b, err)
b.Cleanup(func() {
require.NoError(b, db.Close())
})
handler := NewWriteHandler(log.NewNopLogger(), db.Head())
buf, _, err := buildWriteRequest(genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil)
require.NoError(b, err)
req, err := http.NewRequest("", "", bytes.NewReader(buf))
require.NoError(b, err)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
require.Equal(b, http.StatusNoContent, recorder.Code)
require.Equal(b, db.Head().NumSeries(), uint64(1000))
var bufRequests [][]byte
for i := 0; i < 100; i++ {
buf, _, err = buildWriteRequest(genSeriesWithSample(1000, int64(80+i)*time.Minute.Milliseconds()), nil, nil, nil)
require.NoError(b, err)
bufRequests = append(bufRequests, buf)
}
b.ResetTimer()
for i := 0; i < 100; i++ {
req, err = http.NewRequest("", "", bytes.NewReader(bufRequests[i]))
require.NoError(b, err)
recorder = httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
require.Equal(b, http.StatusNoContent, recorder.Code)
require.Equal(b, db.Head().NumSeries(), uint64(1000))
}
}
func genSeriesWithSample(numSeries int, ts int64) []prompb.TimeSeries {
var series []prompb.TimeSeries
for i := 0; i < numSeries; i++ {
s := prompb.TimeSeries{
Labels: []prompb.Label{{Name: "__name__", Value: fmt.Sprintf("test_metric_%d", i)}},
Samples: []prompb.Sample{{Value: float64(i), Timestamp: ts}},
}
series = append(series, s)
}
return series
}
type mockAppendable struct {
latestSample int64
samples []mockSample

View file

@ -44,6 +44,11 @@ import (
"github.com/prometheus/prometheus/tsdb/wlog"
)
const (
sampleMetricTypeFloat = "float"
sampleMetricTypeHistogram = "histogram"
)
var ErrUnsupported = errors.New("unsupported operation with WAL-only storage")
// Default values for options.
@ -96,7 +101,7 @@ type dbMetrics struct {
numActiveSeries prometheus.Gauge
numWALSeriesPendingDeletion prometheus.Gauge
totalAppendedSamples prometheus.Counter
totalAppendedSamples *prometheus.CounterVec
totalAppendedExemplars prometheus.Counter
totalOutOfOrderSamples prometheus.Counter
walTruncateDuration prometheus.Summary
@ -120,10 +125,10 @@ func newDBMetrics(r prometheus.Registerer) *dbMetrics {
Help: "Number of series pending deletion from the WAL",
})
m.totalAppendedSamples = prometheus.NewCounter(prometheus.CounterOpts{
m.totalAppendedSamples = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "prometheus_agent_samples_appended_total",
Help: "Total number of samples appended to the storage",
})
}, []string{"type"})
m.totalAppendedExemplars = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_agent_exemplars_appended_total",
@ -284,10 +289,12 @@ func Open(l log.Logger, reg prometheus.Registerer, rs *remote.Storage, dir strin
db.appenderPool.New = func() interface{} {
return &appender{
DB: db,
pendingSeries: make([]record.RefSeries, 0, 100),
pendingSamples: make([]record.RefSample, 0, 100),
pendingExamplars: make([]record.RefExemplar, 0, 10),
DB: db,
pendingSeries: make([]record.RefSeries, 0, 100),
pendingSamples: make([]record.RefSample, 0, 100),
pendingHistograms: make([]record.RefHistogramSample, 0, 100),
pendingFloatHistograms: make([]record.RefFloatHistogramSample, 0, 100),
pendingExamplars: make([]record.RefExemplar, 0, 10),
}
}
@ -411,6 +418,16 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
return []record.RefSample{}
},
}
histogramsPool = sync.Pool{
New: func() interface{} {
return []record.RefHistogramSample{}
},
}
floatHistogramsPool = sync.Pool{
New: func() interface{} {
return []record.RefFloatHistogramSample{}
},
}
)
go func() {
@ -443,6 +460,30 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
return
}
decoded <- samples
case record.HistogramSamples:
histograms := histogramsPool.Get().([]record.RefHistogramSample)[:0]
histograms, err = dec.HistogramSamples(rec, histograms)
if err != nil {
errCh <- &wlog.CorruptionErr{
Err: errors.Wrap(err, "decode histogram samples"),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- histograms
case record.FloatHistogramSamples:
floatHistograms := floatHistogramsPool.Get().([]record.RefFloatHistogramSample)[:0]
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
if err != nil {
errCh <- &wlog.CorruptionErr{
Err: errors.Wrap(err, "decode float histogram samples"),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- floatHistograms
case record.Tombstones, record.Exemplars:
// We don't care about tombstones or exemplars during replay.
// TODO: If decide to decode exemplars, we should make sure to prepopulate
@ -496,6 +537,36 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
//nolint:staticcheck
samplesPool.Put(v)
case []record.RefHistogramSample:
for _, entry := range v {
// Update the lastTs for the series based
ref, ok := multiRef[entry.Ref]
if !ok {
nonExistentSeriesRefs.Inc()
continue
}
series := db.series.GetByID(ref)
if entry.T > series.lastTs {
series.lastTs = entry.T
}
}
//nolint:staticcheck
histogramsPool.Put(v)
case []record.RefFloatHistogramSample:
for _, entry := range v {
// Update the lastTs for the series based
ref, ok := multiRef[entry.Ref]
if !ok {
nonExistentSeriesRefs.Inc()
continue
}
series := db.series.GetByID(ref)
if entry.T > series.lastTs {
series.lastTs = entry.T
}
}
//nolint:staticcheck
floatHistogramsPool.Put(v)
default:
panic(fmt.Errorf("unexpected decoded type: %T", d))
}
@ -695,13 +766,23 @@ func (db *DB) Close() error {
type appender struct {
*DB
pendingSeries []record.RefSeries
pendingSamples []record.RefSample
pendingExamplars []record.RefExemplar
pendingSeries []record.RefSeries
pendingSamples []record.RefSample
pendingHistograms []record.RefHistogramSample
pendingFloatHistograms []record.RefFloatHistogramSample
pendingExamplars []record.RefExemplar
// Pointers to the series referenced by each element of pendingSamples.
// Series lock is not held on elements.
sampleSeries []*memSeries
// Pointers to the series referenced by each element of pendingHistograms.
// Series lock is not held on elements.
histogramSeries []*memSeries
// Pointers to the series referenced by each element of pendingFloatHistograms.
// Series lock is not held on elements.
floatHistogramSeries []*memSeries
}
func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
@ -749,7 +830,7 @@ func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v flo
})
a.sampleSeries = append(a.sampleSeries, series)
a.metrics.totalAppendedSamples.Inc()
a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricTypeFloat).Inc()
return storage.SeriesRef(series.ref), nil
}
@ -821,8 +902,74 @@ func (a *appender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exem
}
func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
// TODO: Add histogram support.
return 0, nil
if h != nil {
if err := tsdb.ValidateHistogram(h); err != nil {
return 0, err
}
}
if fh != nil {
if err := tsdb.ValidateFloatHistogram(fh); err != nil {
return 0, err
}
}
// series references and chunk references are identical for agent mode.
headRef := chunks.HeadSeriesRef(ref)
series := a.series.GetByID(headRef)
if series == nil {
// Ensure no empty or duplicate labels have gotten through. This mirrors the
// equivalent validation code in the TSDB's headAppender.
l = l.WithoutEmpty()
if l.IsEmpty() {
return 0, errors.Wrap(tsdb.ErrInvalidSample, "empty labelset")
}
if lbl, dup := l.HasDuplicateLabelNames(); dup {
return 0, errors.Wrap(tsdb.ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, lbl))
}
var created bool
series, created = a.getOrCreate(l)
if created {
a.pendingSeries = append(a.pendingSeries, record.RefSeries{
Ref: series.ref,
Labels: l,
})
a.metrics.numActiveSeries.Inc()
}
}
series.Lock()
defer series.Unlock()
if t < series.lastTs {
a.metrics.totalOutOfOrderSamples.Inc()
return 0, storage.ErrOutOfOrderSample
}
if h != nil {
// NOTE: always modify pendingHistograms and histogramSeries together
a.pendingHistograms = append(a.pendingHistograms, record.RefHistogramSample{
Ref: series.ref,
T: t,
H: h,
})
a.histogramSeries = append(a.histogramSeries, series)
} else if fh != nil {
// NOTE: always modify pendingFloatHistograms and floatHistogramSeries together
a.pendingFloatHistograms = append(a.pendingFloatHistograms, record.RefFloatHistogramSample{
Ref: series.ref,
T: t,
FH: fh,
})
a.floatHistogramSeries = append(a.floatHistogramSeries, series)
}
a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
return storage.SeriesRef(series.ref), nil
}
func (a *appender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
@ -854,6 +1001,22 @@ func (a *appender) Commit() error {
buf = buf[:0]
}
if len(a.pendingHistograms) > 0 {
buf = encoder.HistogramSamples(a.pendingHistograms, buf)
if err := a.wal.Log(buf); err != nil {
return err
}
buf = buf[:0]
}
if len(a.pendingFloatHistograms) > 0 {
buf = encoder.FloatHistogramSamples(a.pendingFloatHistograms, buf)
if err := a.wal.Log(buf); err != nil {
return err
}
buf = buf[:0]
}
if len(a.pendingExamplars) > 0 {
buf = encoder.Exemplars(a.pendingExamplars, buf)
if err := a.wal.Log(buf); err != nil {
@ -869,6 +1032,18 @@ func (a *appender) Commit() error {
a.metrics.totalOutOfOrderSamples.Inc()
}
}
for i, s := range a.pendingHistograms {
series = a.histogramSeries[i]
if !series.updateTimestamp(s.T) {
a.metrics.totalOutOfOrderSamples.Inc()
}
}
for i, s := range a.pendingFloatHistograms {
series = a.floatHistogramSeries[i]
if !series.updateTimestamp(s.T) {
a.metrics.totalOutOfOrderSamples.Inc()
}
}
//nolint:staticcheck
a.bufPool.Put(buf)
@ -878,8 +1053,12 @@ func (a *appender) Commit() error {
func (a *appender) Rollback() error {
a.pendingSeries = a.pendingSeries[:0]
a.pendingSamples = a.pendingSamples[:0]
a.pendingHistograms = a.pendingHistograms[:0]
a.pendingFloatHistograms = a.pendingFloatHistograms[:0]
a.pendingExamplars = a.pendingExamplars[:0]
a.sampleSeries = a.sampleSeries[:0]
a.histogramSeries = a.histogramSeries[:0]
a.floatHistogramSeries = a.floatHistogramSeries[:0]
a.appenderPool.Put(a)
return nil
}

View file

@ -53,6 +53,14 @@ func TestDB_InvalidSeries(t *testing.T) {
require.ErrorIs(t, err, tsdb.ErrInvalidSample, "should reject duplicate labels")
})
t.Run("Histograms", func(t *testing.T) {
_, err := app.AppendHistogram(0, labels.Labels{}, 0, tsdb.GenerateTestHistograms(1)[0], nil)
require.ErrorIs(t, err, tsdb.ErrInvalidSample, "should reject empty labels")
_, err = app.AppendHistogram(0, labels.FromStrings("a", "1", "a", "2"), 0, tsdb.GenerateTestHistograms(1)[0], nil)
require.ErrorIs(t, err, tsdb.ErrInvalidSample, "should reject duplicate labels")
})
t.Run("Exemplars", func(t *testing.T) {
sRef, err := app.Append(0, labels.FromStrings("a", "1"), 0, 0)
require.NoError(t, err, "should not reject valid series")
@ -112,6 +120,7 @@ func TestUnsupportedFunctions(t *testing.T) {
func TestCommit(t *testing.T) {
const (
numDatapoints = 1000
numHistograms = 100
numSeries = 8
)
@ -138,6 +147,30 @@ func TestCommit(t *testing.T) {
}
}
lbls = labelsForTest(t.Name()+"_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdb.GenerateTestHistograms(numHistograms)
for i := 0; i < numHistograms; i++ {
_, err := app.AppendHistogram(0, lset, int64(i), histograms[i], nil)
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdb.GenerateTestFloatHistograms(numHistograms)
for i := 0; i < numHistograms; i++ {
_, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i])
require.NoError(t, err)
}
}
require.NoError(t, app.Commit())
require.NoError(t, s.Close())
@ -152,7 +185,7 @@ func TestCommit(t *testing.T) {
r = wlog.NewReader(sr)
dec record.Decoder
walSeriesCount, walSamplesCount, walExemplarsCount int
walSeriesCount, walSamplesCount, walExemplarsCount, walHistogramCount, walFloatHistogramCount int
)
for r.Next() {
rec := r.Record()
@ -169,6 +202,18 @@ func TestCommit(t *testing.T) {
require.NoError(t, err)
walSamplesCount += len(samples)
case record.HistogramSamples:
var histograms []record.RefHistogramSample
histograms, err = dec.HistogramSamples(rec, histograms)
require.NoError(t, err)
walHistogramCount += len(histograms)
case record.FloatHistogramSamples:
var floatHistograms []record.RefFloatHistogramSample
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
require.NoError(t, err)
walFloatHistogramCount += len(floatHistograms)
case record.Exemplars:
var exemplars []record.RefExemplar
exemplars, err = dec.Exemplars(rec, exemplars)
@ -180,14 +225,17 @@ func TestCommit(t *testing.T) {
}
// Check that the WAL contained the same number of committed series/samples/exemplars.
require.Equal(t, numSeries, walSeriesCount, "unexpected number of series")
require.Equal(t, numSeries*3, walSeriesCount, "unexpected number of series")
require.Equal(t, numSeries*numDatapoints, walSamplesCount, "unexpected number of samples")
require.Equal(t, numSeries*numDatapoints, walExemplarsCount, "unexpected number of exemplars")
require.Equal(t, numSeries*numHistograms, walHistogramCount, "unexpected number of histograms")
require.Equal(t, numSeries*numHistograms, walFloatHistogramCount, "unexpected number of float histograms")
}
func TestRollback(t *testing.T) {
const (
numDatapoints = 1000
numHistograms = 100
numSeries = 8
)
@ -205,6 +253,30 @@ func TestRollback(t *testing.T) {
}
}
lbls = labelsForTest(t.Name()+"_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdb.GenerateTestHistograms(numHistograms)
for i := 0; i < numHistograms; i++ {
_, err := app.AppendHistogram(0, lset, int64(i), histograms[i], nil)
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdb.GenerateTestFloatHistograms(numHistograms)
for i := 0; i < numHistograms; i++ {
_, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i])
require.NoError(t, err)
}
}
// Do a rollback, which should clear uncommitted data. A followup call to
// commit should persist nothing to the WAL.
require.NoError(t, app.Rollback())
@ -222,7 +294,7 @@ func TestRollback(t *testing.T) {
r = wlog.NewReader(sr)
dec record.Decoder
walSeriesCount, walSamplesCount, walExemplarsCount int
walSeriesCount, walSamplesCount, walHistogramCount, walFloatHistogramCount, walExemplarsCount int
)
for r.Next() {
rec := r.Record()
@ -245,6 +317,18 @@ func TestRollback(t *testing.T) {
require.NoError(t, err)
walExemplarsCount += len(exemplars)
case record.HistogramSamples:
var histograms []record.RefHistogramSample
histograms, err = dec.HistogramSamples(rec, histograms)
require.NoError(t, err)
walHistogramCount += len(histograms)
case record.FloatHistogramSamples:
var floatHistograms []record.RefFloatHistogramSample
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
require.NoError(t, err)
walFloatHistogramCount += len(floatHistograms)
default:
}
}
@ -253,11 +337,14 @@ func TestRollback(t *testing.T) {
require.Equal(t, 0, walSeriesCount, "series should not have been written to WAL")
require.Equal(t, 0, walSamplesCount, "samples should not have been written to WAL")
require.Equal(t, 0, walExemplarsCount, "exemplars should not have been written to WAL")
require.Equal(t, 0, walHistogramCount, "histograms should not have been written to WAL")
require.Equal(t, 0, walFloatHistogramCount, "float histograms should not have been written to WAL")
}
func TestFullTruncateWAL(t *testing.T) {
const (
numDatapoints = 1000
numHistograms = 100
numSeries = 800
lastTs = 500
)
@ -283,11 +370,37 @@ func TestFullTruncateWAL(t *testing.T) {
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdb.GenerateTestHistograms(numHistograms)
for i := 0; i < numHistograms; i++ {
_, err := app.AppendHistogram(0, lset, int64(lastTs), histograms[i], nil)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdb.GenerateTestFloatHistograms(numHistograms)
for i := 0; i < numHistograms; i++ {
_, err := app.AppendHistogram(0, lset, int64(lastTs), nil, floatHistograms[i])
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
// Truncate WAL with mint to GC all the samples.
s.truncate(lastTs + 1)
m := gatherFamily(t, reg, "prometheus_agent_deleted_series")
require.Equal(t, float64(numSeries), m.Metric[0].Gauge.GetValue(), "agent wal truncate mismatch of deleted series count")
require.Equal(t, float64(numSeries*3), m.Metric[0].Gauge.GetValue(), "agent wal truncate mismatch of deleted series count")
}
func TestPartialTruncateWAL(t *testing.T) {
@ -319,6 +432,32 @@ func TestPartialTruncateWAL(t *testing.T) {
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_histogram_batch-1", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdb.GenerateTestHistograms(numDatapoints)
for i := 0; i < numDatapoints; i++ {
_, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_float_histogram_batch-1", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdb.GenerateTestFloatHistograms(numDatapoints)
for i := 0; i < numDatapoints; i++ {
_, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i])
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
// Create second batch of 800 series with 1000 data-points with a fixed lastTs as 600.
lastTs = 600
lbls = labelsForTest(t.Name()+"batch-2", numSeries)
@ -332,16 +471,43 @@ func TestPartialTruncateWAL(t *testing.T) {
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_histogram_batch-2", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdb.GenerateTestHistograms(numDatapoints)
for i := 0; i < numDatapoints; i++ {
_, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
lbls = labelsForTest(t.Name()+"_float_histogram_batch-2", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdb.GenerateTestFloatHistograms(numDatapoints)
for i := 0; i < numDatapoints; i++ {
_, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i])
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
// Truncate WAL with mint to GC only the first batch of 800 series and retaining 2nd batch of 800 series.
s.truncate(lastTs - 1)
m := gatherFamily(t, reg, "prometheus_agent_deleted_series")
require.Equal(t, m.Metric[0].Gauge.GetValue(), float64(numSeries), "agent wal truncate mismatch of deleted series count")
require.Equal(t, float64(numSeries*3), m.Metric[0].Gauge.GetValue(), "agent wal truncate mismatch of deleted series count")
}
func TestWALReplay(t *testing.T) {
const (
numDatapoints = 1000
numHistograms = 100
numSeries = 8
lastTs = 500
)
@ -359,6 +525,30 @@ func TestWALReplay(t *testing.T) {
}
}
lbls = labelsForTest(t.Name()+"_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdb.GenerateTestHistograms(numHistograms)
for i := 0; i < numHistograms; i++ {
_, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil)
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdb.GenerateTestFloatHistograms(numHistograms)
for i := 0; i < numHistograms; i++ {
_, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i])
require.NoError(t, err)
}
}
require.NoError(t, app.Commit())
require.NoError(t, s.Close())
@ -377,7 +567,7 @@ func TestWALReplay(t *testing.T) {
// Check if all the series are retrieved back from the WAL.
m := gatherFamily(t, reg, "prometheus_agent_active_series")
require.Equal(t, float64(numSeries), m.Metric[0].Gauge.GetValue(), "agent wal replay mismatch of active series count")
require.Equal(t, float64(numSeries*3), m.Metric[0].Gauge.GetValue(), "agent wal replay mismatch of active series count")
// Check if lastTs of the samples retrieved from the WAL is retained.
metrics := replayStorage.series.series
@ -430,6 +620,15 @@ func Test_ExistingWAL_NextRef(t *testing.T) {
_, err := app.Append(0, lset, 0, 100)
require.NoError(t, err)
}
histogramCount := 10
histograms := tsdb.GenerateTestHistograms(histogramCount)
// Append <histogramCount> series
for i := 0; i < histogramCount; i++ {
lset := labels.FromStrings(model.MetricNameLabel, fmt.Sprintf("histogram_%d", i))
_, err := app.AppendHistogram(0, lset, 0, histograms[i], nil)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
// Truncate the WAL to force creation of a new segment.
@ -441,7 +640,7 @@ func Test_ExistingWAL_NextRef(t *testing.T) {
require.NoError(t, err)
defer require.NoError(t, db.Close())
require.Equal(t, uint64(seriesCount), db.nextRef.Load(), "nextRef should be equal to the number of series written across the entire WAL")
require.Equal(t, uint64(seriesCount+histogramCount), db.nextRef.Load(), "nextRef should be equal to the number of series written across the entire WAL")
}
func Test_validateOptions(t *testing.T) {

View file

@ -72,7 +72,7 @@ type IndexReader interface {
// Postings returns the postings list iterator for the label pairs.
// The Postings here contain the offsets to the series inside the index.
// Found IDs are not strictly required to point to a valid Series, e.g.
// during background garbage collections. Input values must be sorted.
// during background garbage collections.
Postings(name string, values ...string) (index.Postings, error)
// PostingsForMatchers assembles a single postings iterator based on the given matchers.

View file

@ -174,6 +174,7 @@ func newFloatHistogramIterator(b []byte) *floatHistogramIterator {
// The first 3 bytes contain chunk headers.
// We skip that for actual samples.
_, _ = it.br.readBits(24)
it.counterResetHeader = CounterResetHeader(b[2] & 0b11000000)
return it
}
@ -196,6 +197,14 @@ type FloatHistogramAppender struct {
pBuckets, nBuckets []xorValue
}
func (a *FloatHistogramAppender) GetCounterResetHeader() CounterResetHeader {
return CounterResetHeader(a.b.bytes()[2] & 0b11000000)
}
func (a *FloatHistogramAppender) NumSamples() int {
return int(binary.BigEndian.Uint16(a.b.bytes()))
}
// Append implements Appender. This implementation panics because normal float
// samples must never be appended to a histogram chunk.
func (a *FloatHistogramAppender) Append(int64, float64) {
@ -211,19 +220,14 @@ func (a *FloatHistogramAppender) AppendHistogram(int64, *histogram.Histogram) {
// Appendable returns whether the chunk can be appended to, and if so
// whether any recoding needs to happen using the provided interjections
// (in case of any new buckets, positive or negative range, respectively).
// If the sample is a gauge histogram, AppendableGauge must be used instead.
//
// The chunk is not appendable in the following cases:
//
// • The schema has changed.
//
// • The threshold for the zero bucket has changed.
//
// • Any buckets have disappeared.
//
// • There was a counter reset in the count of observations or in any bucket,
// including the zero bucket.
//
// • The last sample in the chunk was stale while the current sample is not stale.
// - The schema has changed.
// - The threshold for the zero bucket has changed.
// - Any buckets have disappeared.
// - There was a counter reset in the count of observations or in any bucket, including the zero bucket.
// - The last sample in the chunk was stale while the current sample is not stale.
//
// The method returns an additional boolean set to true if it is not appendable
// because of a counter reset. If the given sample is stale, it is always ok to
@ -232,6 +236,9 @@ func (a *FloatHistogramAppender) Appendable(h *histogram.FloatHistogram) (
positiveInterjections, negativeInterjections []Interjection,
okToAppend, counterReset bool,
) {
if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType {
return
}
if value.IsStaleNaN(h.Sum) {
// This is a stale sample whose buckets and spans don't matter.
okToAppend = true
@ -260,12 +267,12 @@ func (a *FloatHistogramAppender) Appendable(h *histogram.FloatHistogram) (
}
var ok bool
positiveInterjections, ok = compareSpans(a.pSpans, h.PositiveSpans)
positiveInterjections, ok = forwardCompareSpans(a.pSpans, h.PositiveSpans)
if !ok {
counterReset = true
return
}
negativeInterjections, ok = compareSpans(a.nSpans, h.NegativeSpans)
negativeInterjections, ok = forwardCompareSpans(a.nSpans, h.NegativeSpans)
if !ok {
counterReset = true
return
@ -281,6 +288,49 @@ func (a *FloatHistogramAppender) Appendable(h *histogram.FloatHistogram) (
return
}
// AppendableGauge returns whether the chunk can be appended to, and if so
// whether:
// 1. Any recoding needs to happen to the chunk using the provided interjections
// (in case of any new buckets, positive or negative range, respectively).
// 2. Any recoding needs to happen for the histogram being appended, using the backward interjections
// (in case of any missing buckets, positive or negative range, respectively).
//
// This method must be only used for gauge histograms.
//
// The chunk is not appendable in the following cases:
// - The schema has changed.
// - The threshold for the zero bucket has changed.
// - The last sample in the chunk was stale while the current sample is not stale.
func (a *FloatHistogramAppender) AppendableGauge(h *histogram.FloatHistogram) (
positiveInterjections, negativeInterjections []Interjection,
backwardPositiveInterjections, backwardNegativeInterjections []Interjection,
positiveSpans, negativeSpans []histogram.Span,
okToAppend bool,
) {
if a.NumSamples() > 0 && a.GetCounterResetHeader() != GaugeType {
return
}
if value.IsStaleNaN(h.Sum) {
// This is a stale sample whose buckets and spans don't matter.
okToAppend = true
return
}
if value.IsStaleNaN(a.sum.value) {
// If the last sample was stale, then we can only accept stale
// samples in this chunk.
return
}
if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold {
return
}
positiveInterjections, backwardPositiveInterjections, positiveSpans = bidirectionalCompareSpans(a.pSpans, h.PositiveSpans)
negativeInterjections, backwardNegativeInterjections, negativeSpans = bidirectionalCompareSpans(a.nSpans, h.NegativeSpans)
okToAppend = true
return
}
// counterResetInAnyFloatBucket returns true if there was a counter reset for any
// bucket. This should be called only when the bucket layout is the same or new
// buckets were added. It does not handle the case of buckets missing.
@ -502,11 +552,29 @@ func (a *FloatHistogramAppender) Recode(
return hc, app
}
// RecodeHistogramm converts the current histogram (in-place) to accommodate an expansion of the set of
// (positive and/or negative) buckets used.
func (a *FloatHistogramAppender) RecodeHistogramm(
fh *histogram.FloatHistogram,
pBackwardInter, nBackwardInter []Interjection,
) {
if len(pBackwardInter) > 0 {
numPositiveBuckets := countSpans(fh.PositiveSpans)
fh.PositiveBuckets = interject(fh.PositiveBuckets, make([]float64, numPositiveBuckets), pBackwardInter, false)
}
if len(nBackwardInter) > 0 {
numNegativeBuckets := countSpans(fh.NegativeSpans)
fh.NegativeBuckets = interject(fh.NegativeBuckets, make([]float64, numNegativeBuckets), nBackwardInter, false)
}
}
type floatHistogramIterator struct {
br bstreamReader
numTotal uint16
numRead uint16
counterResetHeader CounterResetHeader
// Layout:
schema int32
zThreshold float64
@ -559,16 +627,21 @@ func (it *floatHistogramIterator) AtFloatHistogram() (int64, *histogram.FloatHis
return it.t, &histogram.FloatHistogram{Sum: it.sum.value}
}
it.atFloatHistogramCalled = true
crHint := histogram.UnknownCounterReset
if it.counterResetHeader == GaugeType {
crHint = histogram.GaugeType
}
return it.t, &histogram.FloatHistogram{
Count: it.cnt.value,
ZeroCount: it.zCnt.value,
Sum: it.sum.value,
ZeroThreshold: it.zThreshold,
Schema: it.schema,
PositiveSpans: it.pSpans,
NegativeSpans: it.nSpans,
PositiveBuckets: it.pBuckets,
NegativeBuckets: it.nBuckets,
CounterResetHint: crHint,
Count: it.cnt.value,
ZeroCount: it.zCnt.value,
Sum: it.sum.value,
ZeroThreshold: it.zThreshold,
Schema: it.schema,
PositiveSpans: it.pSpans,
NegativeSpans: it.nSpans,
PositiveBuckets: it.pBuckets,
NegativeBuckets: it.nBuckets,
}
}
@ -587,6 +660,8 @@ func (it *floatHistogramIterator) Reset(b []byte) {
it.numTotal = binary.BigEndian.Uint16(b)
it.numRead = 0
it.counterResetHeader = CounterResetHeader(b[2] & 0b11000000)
it.t, it.tDelta = 0, 0
it.cnt, it.zCnt, it.sum = xorValue{}, xorValue{}, xorValue{}

View file

@ -358,3 +358,171 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
require.True(t, cr)
}
}
func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
c := Chunk(NewFloatHistogramChunk())
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
ts := int64(1234567890)
h1 := &histogram.FloatHistogram{
Count: 5,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1},
}
app.AppendFloatHistogram(ts, h1.Copy())
require.Equal(t, 1, c.NumSamples())
c.(*FloatHistogramChunk).SetCounterResetHeader(GaugeType)
{ // Schema change.
h2 := h1.Copy()
h2.Schema++
hApp, _ := app.(*FloatHistogramAppender)
_, _, _, _, _, _, ok := hApp.AppendableGauge(h2)
require.False(t, ok)
}
{ // Zero threshold change.
h2 := h1.Copy()
h2.ZeroThreshold += 0.1
hApp, _ := app.(*FloatHistogramAppender)
_, _, _, _, _, _, ok := hApp.AppendableGauge(h2)
require.False(t, ok)
}
{ // New histogram that has more buckets.
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
}
h2.Count += 9
h2.ZeroCount++
h2.Sum = 30
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1}
hApp, _ := app.(*FloatHistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
}
{ // New histogram that has buckets missing.
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 1},
{Offset: 4, Length: 1},
{Offset: 1, Length: 1},
}
h2.Count -= 4
h2.Sum--
h2.PositiveBuckets = []float64{6, 3, 3, 2, 5, 1}
hApp, _ := app.(*FloatHistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
require.Len(t, pI, 0)
require.Len(t, nI, 0)
require.Greater(t, len(pBackwardI), 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
}
{ // New histogram that has a bucket missing and new buckets.
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 5, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
}
h2.Sum = 21
h2.PositiveBuckets = []float64{6, 3, 2, 4, 5, 1}
hApp, _ := app.(*FloatHistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Greater(t, len(pBackwardI), 0)
require.Len(t, nI, 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
}
{ // New histogram that has a counter reset while buckets are same.
h2 := h1.Copy()
h2.Sum = 23
h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1}
hApp, _ := app.(*FloatHistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
require.Len(t, pI, 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
}
{ // New histogram that has a counter reset while new buckets were added.
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
}
h2.Sum = 29
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 0}
hApp, _ := app.(*FloatHistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
}
{
// New histogram that has a counter reset while new buckets were
// added before the first bucket and reset on first bucket.
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: -3, Length: 2},
{Offset: 1, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
}
h2.Sum = 26
h2.PositiveBuckets = []float64{1, 2, 5, 3, 3, 2, 4, 5, 1}
hApp, _ := app.(*FloatHistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
}
}

View file

@ -177,6 +177,7 @@ func newHistogramIterator(b []byte) *histogramIterator {
// The first 3 bytes contain chunk headers.
// We skip that for actual samples.
_, _ = it.br.readBits(24)
it.counterResetHeader = CounterResetHeader(b[2] & 0b11000000)
return it
}
@ -222,6 +223,14 @@ type HistogramAppender struct {
trailing uint8
}
func (a *HistogramAppender) GetCounterResetHeader() CounterResetHeader {
return CounterResetHeader(a.b.bytes()[2] & 0b11000000)
}
func (a *HistogramAppender) NumSamples() int {
return int(binary.BigEndian.Uint16(a.b.bytes()))
}
// Append implements Appender. This implementation panics because normal float
// samples must never be appended to a histogram chunk.
func (a *HistogramAppender) Append(int64, float64) {
@ -237,19 +246,16 @@ func (a *HistogramAppender) AppendFloatHistogram(int64, *histogram.FloatHistogra
// Appendable returns whether the chunk can be appended to, and if so
// whether any recoding needs to happen using the provided interjections
// (in case of any new buckets, positive or negative range, respectively).
// If the sample is a gauge histogram, AppendableGauge must be used instead.
//
// The chunk is not appendable in the following cases:
//
// • The schema has changed.
//
// • The threshold for the zero bucket has changed.
//
// • Any buckets have disappeared.
//
// • There was a counter reset in the count of observations or in any bucket,
// including the zero bucket.
//
// • The last sample in the chunk was stale while the current sample is not stale.
// - The schema has changed.
// - The threshold for the zero bucket has changed.
// - Any buckets have disappeared.
// - There was a counter reset in the count of observations or in any bucket,
// including the zero bucket.
// - The last sample in the chunk was stale while the current sample is not stale.
//
// The method returns an additional boolean set to true if it is not appendable
// because of a counter reset. If the given sample is stale, it is always ok to
@ -258,6 +264,9 @@ func (a *HistogramAppender) Appendable(h *histogram.Histogram) (
positiveInterjections, negativeInterjections []Interjection,
okToAppend, counterReset bool,
) {
if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType {
return
}
if value.IsStaleNaN(h.Sum) {
// This is a stale sample whose buckets and spans don't matter.
okToAppend = true
@ -286,12 +295,12 @@ func (a *HistogramAppender) Appendable(h *histogram.Histogram) (
}
var ok bool
positiveInterjections, ok = compareSpans(a.pSpans, h.PositiveSpans)
positiveInterjections, ok = forwardCompareSpans(a.pSpans, h.PositiveSpans)
if !ok {
counterReset = true
return
}
negativeInterjections, ok = compareSpans(a.nSpans, h.NegativeSpans)
negativeInterjections, ok = forwardCompareSpans(a.nSpans, h.NegativeSpans)
if !ok {
counterReset = true
return
@ -307,8 +316,47 @@ func (a *HistogramAppender) Appendable(h *histogram.Histogram) (
return
}
type bucketValue interface {
int64 | float64
// AppendableGauge returns whether the chunk can be appended to, and if so
// whether:
// 1. Any recoding needs to happen to the chunk using the provided interjections
// (in case of any new buckets, positive or negative range, respectively).
// 2. Any recoding needs to happen for the histogram being appended, using the backward interjections
// (in case of any missing buckets, positive or negative range, respectively).
//
// This method must be only used for gauge histograms.
//
// The chunk is not appendable in the following cases:
// - The schema has changed.
// - The threshold for the zero bucket has changed.
// - The last sample in the chunk was stale while the current sample is not stale.
func (a *HistogramAppender) AppendableGauge(h *histogram.Histogram) (
positiveInterjections, negativeInterjections []Interjection,
backwardPositiveInterjections, backwardNegativeInterjections []Interjection,
positiveSpans, negativeSpans []histogram.Span,
okToAppend bool,
) {
if a.NumSamples() > 0 && a.GetCounterResetHeader() != GaugeType {
return
}
if value.IsStaleNaN(h.Sum) {
// This is a stale sample whose buckets and spans don't matter.
okToAppend = true
return
}
if value.IsStaleNaN(a.sum) {
// If the last sample was stale, then we can only accept stale
// samples in this chunk.
return
}
if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold {
return
}
positiveInterjections, backwardPositiveInterjections, positiveSpans = bidirectionalCompareSpans(a.pSpans, h.PositiveSpans)
negativeInterjections, backwardNegativeInterjections, negativeSpans = bidirectionalCompareSpans(a.nSpans, h.NegativeSpans)
okToAppend = true
return
}
// counterResetInAnyBucket returns true if there was a counter reset for any
@ -542,6 +590,22 @@ func (a *HistogramAppender) Recode(
return hc, app
}
// RecodeHistogramm converts the current histogram (in-place) to accommodate an expansion of the set of
// (positive and/or negative) buckets used.
func (a *HistogramAppender) RecodeHistogramm(
h *histogram.Histogram,
pBackwardInter, nBackwardInter []Interjection,
) {
if len(pBackwardInter) > 0 {
numPositiveBuckets := countSpans(h.PositiveSpans)
h.PositiveBuckets = interject(h.PositiveBuckets, make([]int64, numPositiveBuckets), pBackwardInter, true)
}
if len(nBackwardInter) > 0 {
numNegativeBuckets := countSpans(h.NegativeSpans)
h.NegativeBuckets = interject(h.NegativeBuckets, make([]int64, numNegativeBuckets), nBackwardInter, true)
}
}
func (a *HistogramAppender) writeSumDelta(v float64) {
xorWrite(a.b, v, a.sum, &a.leading, &a.trailing)
}
@ -551,6 +615,8 @@ type histogramIterator struct {
numTotal uint16
numRead uint16
counterResetHeader CounterResetHeader
// Layout:
schema int32
zThreshold float64
@ -599,16 +665,21 @@ func (it *histogramIterator) AtHistogram() (int64, *histogram.Histogram) {
return it.t, &histogram.Histogram{Sum: it.sum}
}
it.atHistogramCalled = true
crHint := histogram.UnknownCounterReset
if it.counterResetHeader == GaugeType {
crHint = histogram.GaugeType
}
return it.t, &histogram.Histogram{
Count: it.cnt,
ZeroCount: it.zCnt,
Sum: it.sum,
ZeroThreshold: it.zThreshold,
Schema: it.schema,
PositiveSpans: it.pSpans,
NegativeSpans: it.nSpans,
PositiveBuckets: it.pBuckets,
NegativeBuckets: it.nBuckets,
CounterResetHint: crHint,
Count: it.cnt,
ZeroCount: it.zCnt,
Sum: it.sum,
ZeroThreshold: it.zThreshold,
Schema: it.schema,
PositiveSpans: it.pSpans,
NegativeSpans: it.nSpans,
PositiveBuckets: it.pBuckets,
NegativeBuckets: it.nBuckets,
}
}
@ -617,16 +688,21 @@ func (it *histogramIterator) AtFloatHistogram() (int64, *histogram.FloatHistogra
return it.t, &histogram.FloatHistogram{Sum: it.sum}
}
it.atFloatHistogramCalled = true
crHint := histogram.UnknownCounterReset
if it.counterResetHeader == GaugeType {
crHint = histogram.GaugeType
}
return it.t, &histogram.FloatHistogram{
Count: float64(it.cnt),
ZeroCount: float64(it.zCnt),
Sum: it.sum,
ZeroThreshold: it.zThreshold,
Schema: it.schema,
PositiveSpans: it.pSpans,
NegativeSpans: it.nSpans,
PositiveBuckets: it.pFloatBuckets,
NegativeBuckets: it.nFloatBuckets,
CounterResetHint: crHint,
Count: float64(it.cnt),
ZeroCount: float64(it.zCnt),
Sum: it.sum,
ZeroThreshold: it.zThreshold,
Schema: it.schema,
PositiveSpans: it.pSpans,
NegativeSpans: it.nSpans,
PositiveBuckets: it.pFloatBuckets,
NegativeBuckets: it.nFloatBuckets,
}
}
@ -645,6 +721,8 @@ func (it *histogramIterator) Reset(b []byte) {
it.numTotal = binary.BigEndian.Uint16(b)
it.numRead = 0
it.counterResetHeader = CounterResetHeader(b[2] & 0b11000000)
it.t, it.cnt, it.zCnt = 0, 0, 0
it.tDelta, it.cntDelta, it.zCntDelta = 0, 0, 0

View file

@ -165,21 +165,23 @@ func (b *bucketIterator) Next() (int, bool) {
if b.span >= len(b.spans) {
return 0, false
}
try:
if b.bucket < int(b.spans[b.span].Length-1) { // Try to move within same span.
if b.bucket < int(b.spans[b.span].Length)-1 { // Try to move within same span.
b.bucket++
b.idx++
return b.idx, true
} else if b.span < len(b.spans)-1 { // Try to move from one span to the next.
}
for b.span < len(b.spans)-1 { // Try to move from one span to the next.
b.span++
b.idx += int(b.spans[b.span].Offset + 1)
b.bucket = 0
if b.spans[b.span].Length == 0 {
// Pathological case that should never happen. We can't use this span, let's try again.
goto try
b.idx--
continue
}
return b.idx, true
}
// We're out of options.
return 0, false
}
@ -191,7 +193,7 @@ type Interjection struct {
num int
}
// compareSpans returns the interjections to convert a slice of deltas to a new
// forwardCompareSpans returns the interjections to convert a slice of deltas to a new
// slice representing an expanded set of buckets, or false if incompatible
// (e.g. if buckets were removed).
//
@ -226,11 +228,11 @@ type Interjection struct {
// match a new span layout that adds buckets, we simply need to generate a list
// of interjections.
//
// Note: Within compareSpans we don't have to worry about the changes to the
// Note: Within forwardCompareSpans we don't have to worry about the changes to the
// spans themselves, thanks to the iterators we get to work with the more useful
// bucket indices (which of course directly correspond to the buckets we have to
// adjust).
func compareSpans(a, b []histogram.Span) ([]Interjection, bool) {
func forwardCompareSpans(a, b []histogram.Span) (forward []Interjection, ok bool) {
ai := newBucketIterator(a)
bi := newBucketIterator(b)
@ -278,6 +280,106 @@ loop:
return interjections, true
}
// bidirectionalCompareSpans does everything that forwardCompareSpans does and
// also returns interjections in the other direction (i.e. buckets missing in b that are missing in a).
func bidirectionalCompareSpans(a, b []histogram.Span) (forward, backward []Interjection, mergedSpans []histogram.Span) {
ai := newBucketIterator(a)
bi := newBucketIterator(b)
var interjections, bInterjections []Interjection
var lastBucket int
addBucket := func(b int) {
offset := b - lastBucket - 1
if offset == 0 && len(mergedSpans) > 0 {
mergedSpans[len(mergedSpans)-1].Length++
} else {
if len(mergedSpans) == 0 {
offset++
}
mergedSpans = append(mergedSpans, histogram.Span{
Offset: int32(offset),
Length: 1,
})
}
lastBucket = b
}
// When inter.num becomes > 0, this becomes a valid interjection that
// should be yielded when we finish a streak of new buckets.
var inter, bInter Interjection
av, aOK := ai.Next()
bv, bOK := bi.Next()
loop:
for {
switch {
case aOK && bOK:
switch {
case av == bv: // Both have an identical value. move on!
// Finish WIP interjection and reset.
if inter.num > 0 {
interjections = append(interjections, inter)
inter.num = 0
}
if bInter.num > 0 {
bInterjections = append(bInterjections, bInter)
bInter.num = 0
}
addBucket(av)
av, aOK = ai.Next()
bv, bOK = bi.Next()
inter.pos++
bInter.pos++
case av < bv: // b misses a value that is in a.
bInter.num++
// Collect the forward interjection before advancing the
// position of 'a'.
if inter.num > 0 {
interjections = append(interjections, inter)
inter.num = 0
}
addBucket(av)
inter.pos++
av, aOK = ai.Next()
case av > bv: // a misses a value that is in b. Forward b and recompare.
inter.num++
// Collect the backward interjection before advancing the
// position of 'b'.
if bInter.num > 0 {
bInterjections = append(bInterjections, bInter)
bInter.num = 0
}
addBucket(bv)
bInter.pos++
bv, bOK = bi.Next()
}
case aOK && !bOK: // b misses a value that is in a.
bInter.num++
addBucket(av)
av, aOK = ai.Next()
case !aOK && bOK: // a misses a value that is in b. Forward b and recompare.
inter.num++
addBucket(bv)
bv, bOK = bi.Next()
default: // Both iterators ran out. We're done.
if inter.num > 0 {
interjections = append(interjections, inter)
}
if bInter.num > 0 {
bInterjections = append(bInterjections, bInter)
}
break loop
}
}
return interjections, bInterjections, mergedSpans
}
type bucketValue interface {
int64 | float64
}
// interject merges 'in' with the provided interjections and writes them into
// 'out', which must already have the appropriate length.
func interject[BV bucketValue](in, out []BV, interjections []Interjection, deltas bool) []BV {

View file

@ -111,13 +111,12 @@ func TestBucketIterator(t *testing.T) {
}
}
func TestInterjection(t *testing.T) {
func TestCompareSpansAndInterject(t *testing.T) {
scenarios := []struct {
description string
spansA, spansB []histogram.Span
valid bool
interjections []Interjection
bucketsIn, bucketsOut []int64
description string
spansA, spansB []histogram.Span
interjections, backwardInterjections []Interjection
bucketsIn, bucketsOut []int64
}{
{
description: "single prepend at the beginning",
@ -127,7 +126,6 @@ func TestInterjection(t *testing.T) {
spansB: []histogram.Span{
{Offset: -11, Length: 4},
},
valid: true,
interjections: []Interjection{
{
pos: 0,
@ -145,7 +143,6 @@ func TestInterjection(t *testing.T) {
spansB: []histogram.Span{
{Offset: -10, Length: 4},
},
valid: true,
interjections: []Interjection{
{
pos: 3,
@ -163,7 +160,6 @@ func TestInterjection(t *testing.T) {
spansB: []histogram.Span{
{Offset: -12, Length: 5},
},
valid: true,
interjections: []Interjection{
{
pos: 0,
@ -181,7 +177,6 @@ func TestInterjection(t *testing.T) {
spansB: []histogram.Span{
{Offset: -10, Length: 5},
},
valid: true,
interjections: []Interjection{
{
pos: 3,
@ -199,7 +194,6 @@ func TestInterjection(t *testing.T) {
spansB: []histogram.Span{
{Offset: -12, Length: 7},
},
valid: true,
interjections: []Interjection{
{
pos: 0,
@ -221,7 +215,9 @@ func TestInterjection(t *testing.T) {
spansB: []histogram.Span{
{Offset: -9, Length: 3},
},
valid: false,
backwardInterjections: []Interjection{
{pos: 0, num: 1},
},
},
{
description: "single removal of bucket in the middle",
@ -232,7 +228,9 @@ func TestInterjection(t *testing.T) {
{Offset: -10, Length: 2},
{Offset: 1, Length: 1},
},
valid: false,
backwardInterjections: []Interjection{
{pos: 2, num: 1},
},
},
{
description: "single removal of bucket at the end",
@ -242,7 +240,9 @@ func TestInterjection(t *testing.T) {
spansB: []histogram.Span{
{Offset: -10, Length: 3},
},
valid: false,
backwardInterjections: []Interjection{
{pos: 3, num: 1},
},
},
{
description: "as described in doc comment",
@ -259,7 +259,6 @@ func TestInterjection(t *testing.T) {
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
},
valid: true,
interjections: []Interjection{
{
pos: 2,
@ -277,12 +276,67 @@ func TestInterjection(t *testing.T) {
bucketsIn: []int64{6, -3, 0, -1, 2, 1, -4},
bucketsOut: []int64{6, -3, -3, 3, -3, 0, 2, 2, 1, -5, 1},
},
{
description: "both forward and backward interjections, complex case",
spansA: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
spansB: []histogram.Span{
{Offset: 1, Length: 2},
{Offset: 1, Length: 1},
{Offset: 1, Length: 2},
{Offset: 1, Length: 1},
{Offset: 4, Length: 1},
},
interjections: []Interjection{
{
pos: 2,
num: 1,
},
{
pos: 3,
num: 2,
},
{
pos: 6,
num: 1,
},
},
backwardInterjections: []Interjection{
{
pos: 0,
num: 1,
},
{
pos: 5,
num: 1,
},
{
pos: 6,
num: 1,
},
{
pos: 7,
num: 1,
},
},
},
}
for _, s := range scenarios {
t.Run(s.description, func(t *testing.T) {
interjections, valid := compareSpans(s.spansA, s.spansB)
if !s.valid {
if len(s.backwardInterjections) > 0 {
interjections, bInterjections, _ := bidirectionalCompareSpans(s.spansA, s.spansB)
require.Equal(t, s.interjections, interjections)
require.Equal(t, s.backwardInterjections, bInterjections)
}
interjections, valid := forwardCompareSpans(s.spansA, s.spansB)
if len(s.backwardInterjections) > 0 {
require.False(t, valid, "compareScan unexpectedly returned true")
return
}
@ -292,6 +346,24 @@ func TestInterjection(t *testing.T) {
gotBuckets := make([]int64, len(s.bucketsOut))
interject(s.bucketsIn, gotBuckets, interjections, true)
require.Equal(t, s.bucketsOut, gotBuckets)
floatBucketsIn := make([]float64, len(s.bucketsIn))
last := s.bucketsIn[0]
floatBucketsIn[0] = float64(last)
for i := 1; i < len(floatBucketsIn); i++ {
last += s.bucketsIn[i]
floatBucketsIn[i] = float64(last)
}
floatBucketsOut := make([]float64, len(s.bucketsOut))
last = s.bucketsOut[0]
floatBucketsOut[0] = float64(last)
for i := 1; i < len(floatBucketsOut); i++ {
last += s.bucketsOut[i]
floatBucketsOut[i] = float64(last)
}
gotFloatBuckets := make([]float64, len(floatBucketsOut))
interject(floatBucketsIn, gotFloatBuckets, interjections, false)
require.Equal(t, floatBucketsOut, gotFloatBuckets)
})
}
}
@ -369,3 +441,135 @@ func TestWriteReadHistogramChunkLayout(t *testing.T) {
require.Equal(t, want.negativeSpans, gotNegativeSpans)
}
}
func TestSpansFromBidirectionalCompareSpans(t *testing.T) {
cases := []struct {
s1, s2, exp []histogram.Span
}{
{ // All empty.
s1: []histogram.Span{},
s2: []histogram.Span{},
},
{ // Same spans.
s1: []histogram.Span{},
s2: []histogram.Span{},
},
{
// Has the cases of
// 1. |----| (partial overlap)
// |----|
//
// 2. |-----| (no gap but no overlap as well)
// |---|
//
// 3. |----| (complete overlap)
// |----|
s1: []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 3, Length: 3},
{Offset: 5, Length: 3},
},
s2: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 2},
{Offset: 2, Length: 3},
{Offset: 3, Length: 3},
},
exp: []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 7},
{Offset: 3, Length: 3},
},
},
{
// s1 is superset of s2.
s1: []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 3, Length: 5},
{Offset: 3, Length: 3},
},
s2: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 5, Length: 3},
{Offset: 4, Length: 3},
},
exp: []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 3, Length: 5},
{Offset: 3, Length: 3},
},
},
{
// No overlaps but one span is side by side.
s1: []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 3, Length: 3},
{Offset: 5, Length: 3},
},
s2: []histogram.Span{
{Offset: 3, Length: 3},
{Offset: 4, Length: 2},
},
exp: []histogram.Span{
{Offset: 0, Length: 9},
{Offset: 1, Length: 2},
{Offset: 2, Length: 3},
},
},
{
// No buckets in one of them.
s1: []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 3, Length: 3},
{Offset: 5, Length: 3},
},
s2: []histogram.Span{},
exp: []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 3, Length: 3},
{Offset: 5, Length: 3},
},
},
{ // Zero length spans.
s1: []histogram.Span{
{Offset: -5, Length: 0},
{Offset: 2, Length: 0},
{Offset: 3, Length: 3},
{Offset: 1, Length: 0},
{Offset: 2, Length: 3},
{Offset: 2, Length: 0},
{Offset: 2, Length: 0},
{Offset: 1, Length: 3},
{Offset: 4, Length: 0},
{Offset: 5, Length: 0},
},
s2: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 2},
{Offset: 1, Length: 0},
{Offset: 1, Length: 3},
{Offset: 3, Length: 3},
},
exp: []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 7},
{Offset: 3, Length: 3},
},
},
}
for _, c := range cases {
s1c := make([]histogram.Span, len(c.s1))
s2c := make([]histogram.Span, len(c.s2))
copy(s1c, c.s1)
copy(s2c, c.s2)
_, _, act := bidirectionalCompareSpans(c.s1, c.s2)
require.Equal(t, c.exp, act)
// Check that s1 and s2 are not modified.
require.Equal(t, s1c, c.s1)
require.Equal(t, s2c, c.s2)
_, _, act = bidirectionalCompareSpans(c.s2, c.s1)
require.Equal(t, c.exp, act)
}
}

View file

@ -517,3 +517,171 @@ func TestAtFloatHistogram(t *testing.T) {
i++
}
}
func TestHistogramChunkAppendableGauge(t *testing.T) {
c := Chunk(NewHistogramChunk())
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
ts := int64(1234567890)
h1 := &histogram.Histogram{
Count: 5,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // {6, 3, 3, 2, 4, 5, 1}
}
app.AppendHistogram(ts, h1.Copy())
require.Equal(t, 1, c.NumSamples())
c.(*HistogramChunk).SetCounterResetHeader(GaugeType)
{ // Schema change.
h2 := h1.Copy()
h2.Schema++
hApp, _ := app.(*HistogramAppender)
_, _, _, _, _, _, ok := hApp.AppendableGauge(h2)
require.False(t, ok)
}
{ // Zero threshold change.
h2 := h1.Copy()
h2.ZeroThreshold += 0.1
hApp, _ := app.(*HistogramAppender)
_, _, _, _, _, _, ok := hApp.AppendableGauge(h2)
require.False(t, ok)
}
{ // New histogram that has more buckets.
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
}
h2.Count += 9
h2.ZeroCount++
h2.Sum = 30
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // {7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1}
hApp, _ := app.(*HistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
}
{ // New histogram that has buckets missing.
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 1},
{Offset: 4, Length: 1},
{Offset: 1, Length: 1},
}
h2.Count -= 4
h2.Sum--
h2.PositiveBuckets = []int64{6, -3, 0, -1, 3, -4} // {6, 3, 3, 2, 5, 1}
hApp, _ := app.(*HistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
require.Len(t, pI, 0)
require.Len(t, nI, 0)
require.Greater(t, len(pBackwardI), 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
}
{ // New histogram that has a bucket missing and new buckets.
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 5, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
}
h2.Sum = 21
h2.PositiveBuckets = []int64{6, -3, -1, 2, 1, -4} // {6, 3, 2, 4, 5, 1}
hApp, _ := app.(*HistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Greater(t, len(pBackwardI), 0)
require.Len(t, nI, 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
}
{ // New histogram that has a counter reset while buckets are same.
h2 := h1.Copy()
h2.Sum = 23
h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // {6, 2, 3, 2, 4, 5, 1}
hApp, _ := app.(*HistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
require.Len(t, pI, 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
}
{ // New histogram that has a counter reset while new buckets were added.
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
}
h2.Sum = 29
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 0} // {7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 0}
hApp, _ := app.(*HistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
}
{
// New histogram that has a counter reset while new buckets were
// added before the first bucket and reset on first bucket.
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: -3, Length: 2},
{Offset: 1, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
}
h2.Sum = 26
h2.PositiveBuckets = []int64{1, 1, 3, -2, 0, -1, 2, 1, -4} // {1, 2, 5, 3, 3, 2, 4, 5, 1}
hApp, _ := app.(*HistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
}
}

View file

@ -4095,8 +4095,7 @@ func TestOOOCompaction(t *testing.T) {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls)
require.NoError(t, err)
require.False(t, created)
require.Nil(t, ms.oooHeadChunk)
require.Equal(t, 0, len(ms.oooMmappedChunks))
require.Nil(t, ms.ooo)
}
checkEmptyOOOChunk(series1)
checkEmptyOOOChunk(series2)
@ -4138,8 +4137,8 @@ func TestOOOCompaction(t *testing.T) {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls)
require.NoError(t, err)
require.False(t, created)
require.Greater(t, ms.oooHeadChunk.chunk.NumSamples(), 0)
require.Equal(t, 14, len(ms.oooMmappedChunks)) // 7 original, 7 duplicate.
require.Greater(t, ms.ooo.oooHeadChunk.chunk.NumSamples(), 0)
require.Equal(t, 14, len(ms.ooo.oooMmappedChunks)) // 7 original, 7 duplicate.
}
checkNonEmptyOOOChunk(series1)
checkNonEmptyOOOChunk(series2)
@ -4289,7 +4288,7 @@ func TestOOOCompactionWithNormalCompaction(t *testing.T) {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls)
require.NoError(t, err)
require.False(t, created)
require.Greater(t, ms.oooHeadChunk.chunk.NumSamples(), 0)
require.Greater(t, ms.ooo.oooHeadChunk.chunk.NumSamples(), 0)
}
// If the normal Head is not compacted, the OOO head compaction does not take place.
@ -4317,8 +4316,7 @@ func TestOOOCompactionWithNormalCompaction(t *testing.T) {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls)
require.NoError(t, err)
require.False(t, created)
require.Nil(t, ms.oooHeadChunk)
require.Equal(t, 0, len(ms.oooMmappedChunks))
require.Nil(t, ms.ooo)
}
verifySamples := func(block *Block, fromMins, toMins int64) {
@ -4711,8 +4709,7 @@ func TestOOODisabled(t *testing.T) {
require.NoError(t, err)
require.False(t, created)
require.NotNil(t, ms)
require.Nil(t, ms.oooHeadChunk)
require.Len(t, ms.oooMmappedChunks, 0)
require.Nil(t, ms.ooo)
}
func TestWBLAndMmapReplay(t *testing.T) {
@ -4776,7 +4773,7 @@ func TestWBLAndMmapReplay(t *testing.T) {
require.False(t, created)
require.NoError(t, err)
var s1MmapSamples []tsdbutil.Sample
for _, mc := range ms.oooMmappedChunks {
for _, mc := range ms.ooo.oooMmappedChunks {
chk, err := db.head.chunkDiskMapper.Chunk(mc.ref)
require.NoError(t, err)
it := chk.Iterator(nil)
@ -4983,8 +4980,7 @@ func TestOOOCompactionFailure(t *testing.T) {
ms, created, err := db.head.getOrCreate(series1.Hash(), series1)
require.NoError(t, err)
require.False(t, created)
require.Nil(t, ms.oooHeadChunk)
require.Len(t, ms.oooMmappedChunks, 0)
require.Nil(t, ms.ooo)
// The failed compaction should not have left the ooo Head corrupted.
// Hence, expect no new blocks with another OOO compaction call.
@ -5798,7 +5794,7 @@ func TestDiskFillingUpAfterDisablingOOO(t *testing.T) {
db.DisableCompactions()
ms := db.head.series.getByHash(series1.Hash(), series1)
require.Greater(t, len(ms.oooMmappedChunks), 0, "OOO mmap chunk was not replayed")
require.Greater(t, len(ms.ooo.oooMmappedChunks), 0, "OOO mmap chunk was not replayed")
checkMmapFileContents := func(contains, notContains []string) {
mmapDir := mmappedChunksDir(db.head.opts.ChunkDirRoot)
@ -5826,7 +5822,7 @@ func TestDiskFillingUpAfterDisablingOOO(t *testing.T) {
checkMmapFileContents([]string{"000001", "000002"}, nil)
require.NoError(t, db.Compact())
checkMmapFileContents([]string{"000002"}, []string{"000001"})
require.Equal(t, 0, len(ms.oooMmappedChunks), "OOO mmap chunk was not compacted")
require.Nil(t, ms.ooo, "OOO mmap chunk was not compacted")
addSamples(501, 650)
checkMmapFileContents([]string{"000002", "000003"}, []string{"000001"})

View file

@ -17,6 +17,7 @@ import (
"fmt"
"io"
"math"
"math/rand"
"path/filepath"
"sync"
"time"
@ -694,7 +695,7 @@ func (h *Head) Init(minValidTime int64) error {
offset = snapOffset
}
sr, err := wlog.NewSegmentBufReaderWithOffset(offset, s)
if errors.Cause(err) == io.EOF {
if errors.Is(err, io.EOF) {
// File does not exist.
continue
}
@ -789,7 +790,11 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries)
h.metrics.chunks.Inc()
h.metrics.chunksCreated.Inc()
ms.oooMmappedChunks = append(ms.oooMmappedChunks, &mmappedChunk{
if ms.ooo == nil {
ms.ooo = &memSeriesOOOFields{}
}
ms.ooo.oooMmappedChunks = append(ms.ooo.oooMmappedChunks, &mmappedChunk{
ref: chunkRef,
minTime: mint,
maxTime: maxt,
@ -1692,24 +1697,24 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (
minMmapFile = seq
}
}
if len(series.oooMmappedChunks) > 0 {
seq, _ := series.oooMmappedChunks[0].ref.Unpack()
if series.ooo != nil && len(series.ooo.oooMmappedChunks) > 0 {
seq, _ := series.ooo.oooMmappedChunks[0].ref.Unpack()
if seq < minMmapFile {
minMmapFile = seq
}
for _, ch := range series.oooMmappedChunks {
for _, ch := range series.ooo.oooMmappedChunks {
if ch.minTime < minOOOTime {
minOOOTime = ch.minTime
}
}
}
if series.oooHeadChunk != nil {
if series.oooHeadChunk.minTime < minOOOTime {
minOOOTime = series.oooHeadChunk.minTime
if series.ooo != nil && series.ooo.oooHeadChunk != nil {
if series.ooo.oooHeadChunk.minTime < minOOOTime {
minOOOTime = series.ooo.oooHeadChunk.minTime
}
}
if len(series.mmappedChunks) > 0 || len(series.oooMmappedChunks) > 0 ||
series.headChunk != nil || series.oooHeadChunk != nil || series.pendingCommit {
if len(series.mmappedChunks) > 0 || series.headChunk != nil || series.pendingCommit ||
(series.ooo != nil && (len(series.ooo.oooMmappedChunks) > 0 || series.ooo.oooHeadChunk != nil)) {
seriesMint := series.minTime()
if seriesMint < actualMint {
actualMint = seriesMint
@ -1867,9 +1872,7 @@ type memSeries struct {
headChunk *memChunk // Most recent chunk in memory that's still being built.
firstChunkID chunks.HeadChunkID // HeadChunkID for mmappedChunks[0]
oooMmappedChunks []*mmappedChunk // Immutable chunks on disk containing OOO samples.
oooHeadChunk *oooHeadChunk // Most recent chunk for ooo samples in memory that's still being built.
firstOOOChunkID chunks.HeadChunkID // HeadOOOChunkID for oooMmappedChunks[0]
ooo *memSeriesOOOFields
mmMaxTime int64 // Max time of any mmapped chunk, only used during WAL replay.
@ -1897,6 +1900,14 @@ type memSeries struct {
pendingCommit bool // Whether there are samples waiting to be committed to this series.
}
// memSeriesOOOFields contains the fields required by memSeries
// to handle out-of-order data.
type memSeriesOOOFields struct {
oooMmappedChunks []*mmappedChunk // Immutable chunks on disk containing OOO samples.
oooHeadChunk *oooHeadChunk // Most recent chunk for ooo samples in memory that's still being built.
firstOOOChunkID chunks.HeadChunkID // HeadOOOChunkID for oooMmappedChunks[0].
}
func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, hash uint64, chunkEndTimeVariance float64, isolationDisabled bool) *memSeries {
s := &memSeries{
lset: lset,
@ -1957,15 +1968,19 @@ func (s *memSeries) truncateChunksBefore(mint int64, minOOOMmapRef chunks.ChunkD
}
var removedOOO int
if len(s.oooMmappedChunks) > 0 {
for i, c := range s.oooMmappedChunks {
if s.ooo != nil && len(s.ooo.oooMmappedChunks) > 0 {
for i, c := range s.ooo.oooMmappedChunks {
if c.ref.GreaterThan(minOOOMmapRef) {
break
}
removedOOO = i + 1
}
s.oooMmappedChunks = append(s.oooMmappedChunks[:0], s.oooMmappedChunks[removedOOO:]...)
s.firstOOOChunkID += chunks.HeadChunkID(removedOOO)
s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks[:0], s.ooo.oooMmappedChunks[removedOOO:]...)
s.ooo.firstOOOChunkID += chunks.HeadChunkID(removedOOO)
if len(s.ooo.oooMmappedChunks) == 0 && s.ooo.oooHeadChunk == nil {
s.ooo = nil
}
}
return removedInOrder + removedOOO
@ -2060,7 +2075,7 @@ func (h *Head) updateWALReplayStatusRead(current int) {
func GenerateTestHistograms(n int) (r []*histogram.Histogram) {
for i := 0; i < n; i++ {
r = append(r, &histogram.Histogram{
Count: 5 + uint64(i*4),
Count: 10 + uint64(i*8),
ZeroCount: 2 + uint64(i),
ZeroThreshold: 0.001,
Sum: 18.4 * float64(i+1),
@ -2070,6 +2085,37 @@ func GenerateTestHistograms(n int) (r []*histogram.Histogram) {
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{int64(i + 1), 1, -1, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
NegativeBuckets: []int64{int64(i + 1), 1, -1, 0},
})
}
return r
}
func GenerateTestGaugeHistograms(n int) (r []*histogram.Histogram) {
for x := 0; x < n; x++ {
i := rand.Intn(n)
r = append(r, &histogram.Histogram{
CounterResetHint: histogram.GaugeType,
Count: 10 + uint64(i*8),
ZeroCount: 2 + uint64(i),
ZeroThreshold: 0.001,
Sum: 18.4 * float64(i+1),
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{int64(i + 1), 1, -1, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
NegativeBuckets: []int64{int64(i + 1), 1, -1, 0},
})
}
@ -2079,7 +2125,7 @@ func GenerateTestHistograms(n int) (r []*histogram.Histogram) {
func GenerateTestFloatHistograms(n int) (r []*histogram.FloatHistogram) {
for i := 0; i < n; i++ {
r = append(r, &histogram.FloatHistogram{
Count: 5 + float64(i*4),
Count: 10 + float64(i*8),
ZeroCount: 2 + float64(i),
ZeroThreshold: 0.001,
Sum: 18.4 * float64(i+1),
@ -2089,6 +2135,37 @@ func GenerateTestFloatHistograms(n int) (r []*histogram.FloatHistogram) {
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
NegativeBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)},
})
}
return r
}
func GenerateTestGaugeFloatHistograms(n int) (r []*histogram.FloatHistogram) {
for x := 0; x < n; x++ {
i := rand.Intn(n)
r = append(r, &histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Count: 10 + float64(i*8),
ZeroCount: 2 + float64(i),
ZeroThreshold: 0.001,
Sum: 18.4 * float64(i+1),
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
NegativeBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)},
})
}

View file

@ -1100,7 +1100,10 @@ func (a *headAppender) Commit() (err error) {
// insert is like append, except it inserts. Used for OOO samples.
func (s *memSeries) insert(t int64, v float64, chunkDiskMapper chunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRef chunks.ChunkDiskMapperRef) {
c := s.oooHeadChunk
if s.ooo == nil {
s.ooo = &memSeriesOOOFields{}
}
c := s.ooo.oooHeadChunk
if c == nil || c.chunk.NumSamples() == int(oooCapMax) {
// Note: If no new samples come in then we rely on compaction to clean up stale in-memory OOO chunks.
c, mmapRef = s.cutNewOOOHeadChunk(t, chunkDiskMapper)
@ -1145,27 +1148,39 @@ func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper
// appendHistogram adds the histogram.
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
// TODO(codesome): Support gauge histograms here.
func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID uint64, chunkDiskMapper chunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) {
// Head controls the execution of recoding, so that we own the proper
// chunk reference afterwards. We check for Appendable before
// chunk reference afterwards. We check for Appendable from appender before
// appendPreprocessor because in case it ends up creating a new chunk,
// we need to know if there was also a counter reset or not to set the
// meta properly.
app, _ := s.app.(*chunkenc.HistogramAppender)
var (
positiveInterjections, negativeInterjections []chunkenc.Interjection
pBackwardInter, nBackwardInter []chunkenc.Interjection
pMergedSpans, nMergedSpans []histogram.Span
okToAppend, counterReset bool
)
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange)
if !sampleInOrder {
return sampleInOrder, chunkCreated
}
gauge := h.CounterResetHint == histogram.GaugeType
if app != nil {
positiveInterjections, negativeInterjections, okToAppend, counterReset = app.Appendable(h)
if gauge {
positiveInterjections, negativeInterjections, pBackwardInter, nBackwardInter, pMergedSpans, nMergedSpans, okToAppend = app.AppendableGauge(h)
} else {
positiveInterjections, negativeInterjections, okToAppend, counterReset = app.Appendable(h)
}
}
if !chunkCreated {
if len(pBackwardInter)+len(nBackwardInter) > 0 {
h.PositiveSpans = pMergedSpans
h.NegativeSpans = nMergedSpans
app.RecodeHistogramm(h, pBackwardInter, nBackwardInter)
}
// We have 3 cases here
// - !okToAppend -> We need to cut a new chunk.
// - okToAppend but we have interjections → Existing chunk needs
@ -1190,9 +1205,12 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui
if chunkCreated {
hc := s.headChunk.chunk.(*chunkenc.HistogramChunk)
header := chunkenc.UnknownCounterReset
if counterReset {
switch {
case gauge:
header = chunkenc.GaugeType
case counterReset:
header = chunkenc.CounterReset
} else if okToAppend {
case okToAppend:
header = chunkenc.NotCounterReset
}
hc.SetCounterResetHeader(header)
@ -1216,24 +1234,37 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, appendID uint64, chunkDiskMapper chunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) {
// Head controls the execution of recoding, so that we own the proper
// chunk reference afterwards. We check for Appendable before
// chunk reference afterwards. We check for Appendable from appender before
// appendPreprocessor because in case it ends up creating a new chunk,
// we need to know if there was also a counter reset or not to set the
// meta properly.
app, _ := s.app.(*chunkenc.FloatHistogramAppender)
var (
positiveInterjections, negativeInterjections []chunkenc.Interjection
pBackwardInter, nBackwardInter []chunkenc.Interjection
pMergedSpans, nMergedSpans []histogram.Span
okToAppend, counterReset bool
)
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange)
if !sampleInOrder {
return sampleInOrder, chunkCreated
}
gauge := fh.CounterResetHint == histogram.GaugeType
if app != nil {
positiveInterjections, negativeInterjections, okToAppend, counterReset = app.Appendable(fh)
if gauge {
positiveInterjections, negativeInterjections, pBackwardInter, nBackwardInter,
pMergedSpans, nMergedSpans, okToAppend = app.AppendableGauge(fh)
} else {
positiveInterjections, negativeInterjections, okToAppend, counterReset = app.Appendable(fh)
}
}
if !chunkCreated {
if len(pBackwardInter)+len(nBackwardInter) > 0 {
fh.PositiveSpans = pMergedSpans
fh.NegativeSpans = nMergedSpans
app.RecodeHistogramm(fh, pBackwardInter, nBackwardInter)
}
// We have 3 cases here
// - !okToAppend -> We need to cut a new chunk.
// - okToAppend but we have interjections → Existing chunk needs
@ -1258,9 +1289,12 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram,
if chunkCreated {
hc := s.headChunk.chunk.(*chunkenc.FloatHistogramChunk)
header := chunkenc.UnknownCounterReset
if counterReset {
switch {
case gauge:
header = chunkenc.GaugeType
case counterReset:
header = chunkenc.CounterReset
} else if okToAppend {
case okToAppend:
header = chunkenc.NotCounterReset
}
hc.SetCounterResetHeader(header)
@ -1415,33 +1449,35 @@ func (s *memSeries) cutNewHeadChunk(
return s.headChunk
}
// cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk.
// The caller must ensure that s.ooo is not nil.
func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper chunkDiskMapper) (*oooHeadChunk, chunks.ChunkDiskMapperRef) {
ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper)
s.oooHeadChunk = &oooHeadChunk{
s.ooo.oooHeadChunk = &oooHeadChunk{
chunk: NewOOOChunk(),
minTime: mint,
maxTime: math.MinInt64,
}
return s.oooHeadChunk, ref
return s.ooo.oooHeadChunk, ref
}
func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper chunkDiskMapper) chunks.ChunkDiskMapperRef {
if s.oooHeadChunk == nil {
if s.ooo == nil || s.ooo.oooHeadChunk == nil {
// There is no head chunk, so nothing to m-map here.
return 0
}
xor, _ := s.oooHeadChunk.chunk.ToXOR() // Encode to XorChunk which is more compact and implements all of the needed functionality.
xor, _ := s.ooo.oooHeadChunk.chunk.ToXOR() // Encode to XorChunk which is more compact and implements all of the needed functionality.
oooXor := &chunkenc.OOOXORChunk{XORChunk: xor}
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.oooHeadChunk.minTime, s.oooHeadChunk.maxTime, oooXor, handleChunkWriteError)
s.oooMmappedChunks = append(s.oooMmappedChunks, &mmappedChunk{
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, oooXor, handleChunkWriteError)
s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{
ref: chunkRef,
numSamples: uint16(xor.NumSamples()),
minTime: s.oooHeadChunk.minTime,
maxTime: s.oooHeadChunk.maxTime,
minTime: s.ooo.oooHeadChunk.minTime,
maxTime: s.ooo.oooHeadChunk.maxTime,
})
s.oooHeadChunk = nil
s.ooo.oooHeadChunk = nil
return chunkRef
}

View file

@ -225,8 +225,9 @@ func (s *memSeries) headChunkID(pos int) chunks.HeadChunkID {
// oooHeadChunkID returns the HeadChunkID referred to by the given position.
// * 0 <= pos < len(s.oooMmappedChunks) refer to s.oooMmappedChunks[pos]
// * pos == len(s.oooMmappedChunks) refers to s.oooHeadChunk
// The caller must ensure that s.ooo is not nil.
func (s *memSeries) oooHeadChunkID(pos int) chunks.HeadChunkID {
return chunks.HeadChunkID(pos) + s.firstOOOChunkID
return chunks.HeadChunkID(pos) + s.ooo.firstOOOChunkID
}
// LabelValueFor returns label value for the given label name in the series referred to by ID.
@ -378,6 +379,7 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, cdm chunkDiskMapper, memChunkPo
// might be a merge of all the overlapping chunks, if any, amongst all the
// chunks in the OOOHead.
// This function is not thread safe unless the caller holds a lock.
// The caller must ensure that s.ooo is not nil.
func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm chunkDiskMapper, mint, maxt int64) (chunk *mergedOOOChunks, err error) {
_, cid := chunks.HeadChunkRef(meta.Ref).Unpack()
@ -385,23 +387,23 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm chunkDiskMapper, mint,
// incremented by 1 when new chunk is created, hence (meta - firstChunkID) gives the slice index.
// The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix
// is len(s.mmappedChunks), it represents the next chunk, which is the head chunk.
ix := int(cid) - int(s.firstOOOChunkID)
if ix < 0 || ix > len(s.oooMmappedChunks) {
ix := int(cid) - int(s.ooo.firstOOOChunkID)
if ix < 0 || ix > len(s.ooo.oooMmappedChunks) {
return nil, storage.ErrNotFound
}
if ix == len(s.oooMmappedChunks) {
if s.oooHeadChunk == nil {
if ix == len(s.ooo.oooMmappedChunks) {
if s.ooo.oooHeadChunk == nil {
return nil, errors.New("invalid ooo head chunk")
}
}
// We create a temporary slice of chunk metas to hold the information of all
// possible chunks that may overlap with the requested chunk.
tmpChks := make([]chunkMetaAndChunkDiskMapperRef, 0, len(s.oooMmappedChunks))
tmpChks := make([]chunkMetaAndChunkDiskMapperRef, 0, len(s.ooo.oooMmappedChunks))
oooHeadRef := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.oooMmappedChunks))))
if s.oooHeadChunk != nil && s.oooHeadChunk.OverlapsClosedInterval(mint, maxt) {
oooHeadRef := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks))))
if s.ooo.oooHeadChunk != nil && s.ooo.oooHeadChunk.OverlapsClosedInterval(mint, maxt) {
// We only want to append the head chunk if this chunk existed when
// Series() was called. This brings consistency in case new data
// is added in between Series() and Chunk() calls.
@ -417,7 +419,7 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm chunkDiskMapper, mint,
}
}
for i, c := range s.oooMmappedChunks {
for i, c := range s.ooo.oooMmappedChunks {
chunkRef := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i)))
// We can skip chunks that came in later than the last known OOOLastRef.
if chunkRef > meta.OOOLastRef {
@ -462,11 +464,11 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm chunkDiskMapper, mint,
// If head chunk min and max time match the meta OOO markers
// that means that the chunk has not expanded so we can append
// it as it is.
if s.oooHeadChunk.minTime == meta.OOOLastMinTime && s.oooHeadChunk.maxTime == meta.OOOLastMaxTime {
xor, err = s.oooHeadChunk.chunk.ToXOR() // TODO(jesus.vazquez) (This is an optimization idea that has no priority and might not be that useful) See if we could use a copy of the underlying slice. That would leave the more expensive ToXOR() function only for the usecase where Bytes() is called.
if s.ooo.oooHeadChunk.minTime == meta.OOOLastMinTime && s.ooo.oooHeadChunk.maxTime == meta.OOOLastMaxTime {
xor, err = s.ooo.oooHeadChunk.chunk.ToXOR() // TODO(jesus.vazquez) (This is an optimization idea that has no priority and might not be that useful) See if we could use a copy of the underlying slice. That would leave the more expensive ToXOR() function only for the usecase where Bytes() is called.
} else {
// We need to remove samples that are outside of the markers
xor, err = s.oooHeadChunk.chunk.ToXORBetweenTimestamps(meta.OOOLastMinTime, meta.OOOLastMaxTime)
xor, err = s.ooo.oooHeadChunk.chunk.ToXORBetweenTimestamps(meta.OOOLastMinTime, meta.OOOLastMaxTime)
}
if err != nil {
return nil, errors.Wrap(err, "failed to convert ooo head chunk to xor chunk")

View file

@ -110,7 +110,9 @@ func populateTestWAL(t testing.TB, w *wlog.WL, recs []interface{}) {
func readTestWAL(t testing.TB, dir string) (recs []interface{}) {
sr, err := wlog.NewSegmentsReader(dir)
require.NoError(t, err)
defer sr.Close()
defer func() {
require.NoError(t, sr.Close())
}()
var dec record.Decoder
r := wlog.NewReader(sr)
@ -127,6 +129,14 @@ func readTestWAL(t testing.TB, dir string) (recs []interface{}) {
samples, err := dec.Samples(rec, nil)
require.NoError(t, err)
recs = append(recs, samples)
case record.HistogramSamples:
samples, err := dec.HistogramSamples(rec, nil)
require.NoError(t, err)
recs = append(recs, samples)
case record.FloatHistogramSamples:
samples, err := dec.FloatHistogramSamples(rec, nil)
require.NoError(t, err)
recs = append(recs, samples)
case record.Tombstones:
tstones, err := dec.Tombstones(rec, nil)
require.NoError(t, err)
@ -2898,7 +2908,9 @@ func TestAppendHistogram(t *testing.T) {
t int64
h *histogram.Histogram
}
expHistograms := make([]timedHistogram, 0, numHistograms)
expHistograms := make([]timedHistogram, 0, 2*numHistograms)
// Counter integer histograms.
for _, h := range GenerateTestHistograms(numHistograms) {
_, err := app.AppendHistogram(0, l, ingestTs, h, nil)
require.NoError(t, err)
@ -2910,11 +2922,25 @@ func TestAppendHistogram(t *testing.T) {
}
}
// Gauge integer histograms.
for _, h := range GenerateTestGaugeHistograms(numHistograms) {
_, err := app.AppendHistogram(0, l, ingestTs, h, nil)
require.NoError(t, err)
expHistograms = append(expHistograms, timedHistogram{ingestTs, h})
ingestTs++
if ingestTs%50 == 0 {
require.NoError(t, app.Commit())
app = head.Appender(context.Background())
}
}
type timedFloatHistogram struct {
t int64
h *histogram.FloatHistogram
}
expFloatHistograms := make([]timedFloatHistogram, 0, numHistograms)
expFloatHistograms := make([]timedFloatHistogram, 0, 2*numHistograms)
// Counter float histograms.
for _, fh := range GenerateTestFloatHistograms(numHistograms) {
_, err := app.AppendHistogram(0, l, ingestTs, nil, fh)
require.NoError(t, err)
@ -2925,6 +2951,19 @@ func TestAppendHistogram(t *testing.T) {
app = head.Appender(context.Background())
}
}
// Gauge float histograms.
for _, fh := range GenerateTestGaugeFloatHistograms(numHistograms) {
_, err := app.AppendHistogram(0, l, ingestTs, nil, fh)
require.NoError(t, err)
expFloatHistograms = append(expFloatHistograms, timedFloatHistogram{ingestTs, fh})
ingestTs++
if ingestTs%50 == 0 {
require.NoError(t, app.Commit())
app = head.Appender(context.Background())
}
}
require.NoError(t, app.Commit())
q, err := NewBlockQuerier(head, head.MinTime(), head.MaxTime())
@ -2959,7 +2998,7 @@ func TestAppendHistogram(t *testing.T) {
}
func TestHistogramInWALAndMmapChunk(t *testing.T) {
head, _ := newTestHead(t, 2000, false, false)
head, _ := newTestHead(t, 3000, false, false)
t.Cleanup(func() {
require.NoError(t, head.Close())
})
@ -2968,44 +3007,61 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
// Series with only histograms.
s1 := labels.FromStrings("a", "b1")
k1 := s1.String()
numHistograms := 450
numHistograms := 300
exp := map[string][]tsdbutil.Sample{}
app := head.Appender(context.Background())
ts := int64(0)
for _, h := range GenerateTestHistograms(numHistograms) {
h.Count = h.Count * 2
h.NegativeSpans = h.PositiveSpans
h.NegativeBuckets = h.PositiveBuckets
_, err := app.AppendHistogram(0, s1, ts, h, nil)
require.NoError(t, err)
exp[k1] = append(exp[k1], sample{t: ts, h: h.Copy()})
ts++
if ts%5 == 0 {
require.NoError(t, app.Commit())
app = head.Appender(context.Background())
var app storage.Appender
for _, gauge := range []bool{true, false} {
app = head.Appender(context.Background())
var hists []*histogram.Histogram
if gauge {
hists = GenerateTestGaugeHistograms(numHistograms)
} else {
hists = GenerateTestHistograms(numHistograms)
}
}
require.NoError(t, app.Commit())
app = head.Appender(context.Background())
for _, h := range GenerateTestFloatHistograms(numHistograms) {
h.Count = h.Count * 2
h.NegativeSpans = h.PositiveSpans
h.NegativeBuckets = h.PositiveBuckets
_, err := app.AppendHistogram(0, s1, ts, nil, h)
require.NoError(t, err)
exp[k1] = append(exp[k1], sample{t: ts, fh: h.Copy()})
ts++
if ts%5 == 0 {
require.NoError(t, app.Commit())
app = head.Appender(context.Background())
for _, h := range hists {
h.Count = h.Count * 2
h.NegativeSpans = h.PositiveSpans
h.NegativeBuckets = h.PositiveBuckets
_, err := app.AppendHistogram(0, s1, ts, h, nil)
require.NoError(t, err)
exp[k1] = append(exp[k1], sample{t: ts, h: h.Copy()})
ts++
if ts%5 == 0 {
require.NoError(t, app.Commit())
app = head.Appender(context.Background())
}
}
require.NoError(t, app.Commit())
}
for _, gauge := range []bool{true, false} {
app = head.Appender(context.Background())
var hists []*histogram.FloatHistogram
if gauge {
hists = GenerateTestGaugeFloatHistograms(numHistograms)
} else {
hists = GenerateTestFloatHistograms(numHistograms)
}
for _, h := range hists {
h.Count = h.Count * 2
h.NegativeSpans = h.PositiveSpans
h.NegativeBuckets = h.PositiveBuckets
_, err := app.AppendHistogram(0, s1, ts, nil, h)
require.NoError(t, err)
exp[k1] = append(exp[k1], sample{t: ts, fh: h.Copy()})
ts++
if ts%5 == 0 {
require.NoError(t, app.Commit())
app = head.Appender(context.Background())
}
}
require.NoError(t, app.Commit())
}
require.NoError(t, app.Commit())
// There should be 7 mmap chunks in s1.
// There should be 11 mmap chunks in s1.
ms := head.series.getByHash(s1.Hash(), s1)
require.Len(t, ms.mmappedChunks, 7)
expMmapChunks := make([]*mmappedChunk, 0, 7)
require.Len(t, ms.mmappedChunks, 11)
expMmapChunks := make([]*mmappedChunk, 0, 11)
for _, mmap := range ms.mmappedChunks {
require.Greater(t, mmap.numSamples, uint16(0))
cpy := *mmap
@ -3017,76 +3073,101 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
// Series with mix of histograms and float.
s2 := labels.FromStrings("a", "b2")
k2 := s2.String()
app = head.Appender(context.Background())
ts = 0
for _, h := range GenerateTestHistograms(100) {
ts++
h.Count = h.Count * 2
h.NegativeSpans = h.PositiveSpans
h.NegativeBuckets = h.PositiveBuckets
_, err := app.AppendHistogram(0, s2, int64(ts), h, nil)
require.NoError(t, err)
exp[k2] = append(exp[k2], sample{t: int64(ts), h: h.Copy()})
if ts%20 == 0 {
require.NoError(t, app.Commit())
app = head.Appender(context.Background())
// Add some float.
for i := 0; i < 10; i++ {
ts++
_, err := app.Append(0, s2, int64(ts), float64(ts))
require.NoError(t, err)
exp[k2] = append(exp[k2], sample{t: int64(ts), v: float64(ts)})
}
require.NoError(t, app.Commit())
app = head.Appender(context.Background())
for _, gauge := range []bool{true, false} {
app = head.Appender(context.Background())
var hists []*histogram.Histogram
if gauge {
hists = GenerateTestGaugeHistograms(100)
} else {
hists = GenerateTestHistograms(100)
}
}
require.NoError(t, app.Commit())
app = head.Appender(context.Background())
for _, h := range GenerateTestFloatHistograms(100) {
ts++
h.Count = h.Count * 2
h.NegativeSpans = h.PositiveSpans
h.NegativeBuckets = h.PositiveBuckets
_, err := app.AppendHistogram(0, s2, int64(ts), nil, h)
require.NoError(t, err)
exp[k2] = append(exp[k2], sample{t: int64(ts), fh: h.Copy()})
if ts%20 == 0 {
require.NoError(t, app.Commit())
app = head.Appender(context.Background())
// Add some float.
for i := 0; i < 10; i++ {
ts++
_, err := app.Append(0, s2, int64(ts), float64(ts))
require.NoError(t, err)
exp[k2] = append(exp[k2], sample{t: int64(ts), v: float64(ts)})
for _, h := range hists {
ts++
h.Count = h.Count * 2
h.NegativeSpans = h.PositiveSpans
h.NegativeBuckets = h.PositiveBuckets
_, err := app.AppendHistogram(0, s2, int64(ts), h, nil)
require.NoError(t, err)
exp[k2] = append(exp[k2], sample{t: int64(ts), h: h.Copy()})
if ts%20 == 0 {
require.NoError(t, app.Commit())
app = head.Appender(context.Background())
// Add some float.
for i := 0; i < 10; i++ {
ts++
_, err := app.Append(0, s2, int64(ts), float64(ts))
require.NoError(t, err)
exp[k2] = append(exp[k2], sample{t: int64(ts), v: float64(ts)})
}
require.NoError(t, app.Commit())
app = head.Appender(context.Background())
}
require.NoError(t, app.Commit())
app = head.Appender(context.Background())
}
require.NoError(t, app.Commit())
}
for _, gauge := range []bool{true, false} {
app = head.Appender(context.Background())
var hists []*histogram.FloatHistogram
if gauge {
hists = GenerateTestGaugeFloatHistograms(100)
} else {
hists = GenerateTestFloatHistograms(100)
}
for _, h := range hists {
ts++
h.Count = h.Count * 2
h.NegativeSpans = h.PositiveSpans
h.NegativeBuckets = h.PositiveBuckets
_, err := app.AppendHistogram(0, s2, int64(ts), nil, h)
require.NoError(t, err)
exp[k2] = append(exp[k2], sample{t: int64(ts), fh: h.Copy()})
if ts%20 == 0 {
require.NoError(t, app.Commit())
app = head.Appender(context.Background())
// Add some float.
for i := 0; i < 10; i++ {
ts++
_, err := app.Append(0, s2, int64(ts), float64(ts))
require.NoError(t, err)
exp[k2] = append(exp[k2], sample{t: int64(ts), v: float64(ts)})
}
require.NoError(t, app.Commit())
app = head.Appender(context.Background())
}
}
require.NoError(t, app.Commit())
}
require.NoError(t, app.Commit())
// Restart head.
require.NoError(t, head.Close())
w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false)
require.NoError(t, err)
head, err = NewHead(nil, nil, w, nil, head.opts, nil)
require.NoError(t, err)
require.NoError(t, head.Init(0))
startHead := func() {
w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false)
require.NoError(t, err)
head, err = NewHead(nil, nil, w, nil, head.opts, nil)
require.NoError(t, err)
require.NoError(t, head.Init(0))
}
startHead()
// Checking contents of s1.
ms = head.series.getByHash(s1.Hash(), s1)
require.Equal(t, expMmapChunks, ms.mmappedChunks)
for _, mmap := range ms.mmappedChunks {
require.Greater(t, mmap.numSamples, uint16(0))
}
require.Equal(t, expHeadChunkSamples, ms.headChunk.chunk.NumSamples())
q, err := NewBlockQuerier(head, head.MinTime(), head.MaxTime())
require.NoError(t, err)
act := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "a", "b.*"))
require.Equal(t, exp, act)
testQuery := func() {
q, err := NewBlockQuerier(head, head.MinTime(), head.MaxTime())
require.NoError(t, err)
act := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "a", "b.*"))
require.Equal(t, exp, act)
}
testQuery()
// Restart with no mmap chunks to test WAL replay.
require.NoError(t, head.Close())
require.NoError(t, os.RemoveAll(mmappedChunksDir(head.opts.ChunkDirRoot)))
startHead()
testQuery()
}
func TestChunkSnapshot(t *testing.T) {
@ -3592,7 +3673,7 @@ func TestHistogramCounterResetHeader(t *testing.T) {
if floatHisto {
_, err = app.AppendHistogram(0, l, ts, nil, h.ToFloat())
} else {
_, err = app.AppendHistogram(0, l, ts, h, nil)
_, err = app.AppendHistogram(0, l, ts, h.Copy(), nil)
}
require.NoError(t, err)
require.NoError(t, app.Commit())
@ -3623,10 +3704,6 @@ func TestHistogramCounterResetHeader(t *testing.T) {
}
h := GenerateTestHistograms(1)[0]
if len(h.NegativeBuckets) == 0 {
h.NegativeSpans = append([]histogram.Span{}, h.PositiveSpans...)
h.NegativeBuckets = append([]int64{}, h.PositiveBuckets...)
}
h.PositiveBuckets = []int64{100, 1, 1, 1}
h.NegativeBuckets = []int64{100, 1, 1, 1}
h.Count = 1000
@ -4001,7 +4078,7 @@ func TestOOOWalReplay(t *testing.T) {
require.False(t, ok)
require.NotNil(t, ms)
xor, err := ms.oooHeadChunk.chunk.ToXOR()
xor, err := ms.ooo.oooHeadChunk.chunk.ToXOR()
require.NoError(t, err)
it := xor.Iterator(nil)
@ -4061,16 +4138,16 @@ func TestOOOMmapReplay(t *testing.T) {
require.False(t, ok)
require.NotNil(t, ms)
require.Len(t, ms.oooMmappedChunks, 3)
require.Len(t, ms.ooo.oooMmappedChunks, 3)
// Verify that we can access the chunks without error.
for _, m := range ms.oooMmappedChunks {
for _, m := range ms.ooo.oooMmappedChunks {
chk, err := h.chunkDiskMapper.Chunk(m.ref)
require.NoError(t, err)
require.Equal(t, int(m.numSamples), chk.NumSamples())
}
expMmapChunks := make([]*mmappedChunk, 3)
copy(expMmapChunks, ms.oooMmappedChunks)
copy(expMmapChunks, ms.ooo.oooMmappedChunks)
// Restart head.
require.NoError(t, h.Close())
@ -4089,16 +4166,16 @@ func TestOOOMmapReplay(t *testing.T) {
require.False(t, ok)
require.NotNil(t, ms)
require.Len(t, ms.oooMmappedChunks, len(expMmapChunks))
require.Len(t, ms.ooo.oooMmappedChunks, len(expMmapChunks))
// Verify that we can access the chunks without error.
for _, m := range ms.oooMmappedChunks {
for _, m := range ms.ooo.oooMmappedChunks {
chk, err := h.chunkDiskMapper.Chunk(m.ref)
require.NoError(t, err)
require.Equal(t, int(m.numSamples), chk.NumSamples())
}
actMmapChunks := make([]*mmappedChunk, len(expMmapChunks))
copy(actMmapChunks, ms.oooMmappedChunks)
copy(actMmapChunks, ms.ooo.oooMmappedChunks)
require.Equal(t, expMmapChunks, actMmapChunks)
@ -4493,8 +4570,8 @@ func TestOOOAppendWithNoSeries(t *testing.T) {
require.NotNil(t, ms)
require.Nil(t, ms.headChunk)
require.NotNil(t, ms.oooHeadChunk)
require.Equal(t, expSamples, ms.oooHeadChunk.chunk.NumSamples())
require.NotNil(t, ms.ooo.oooHeadChunk)
require.Equal(t, expSamples, ms.ooo.oooHeadChunk.chunk.NumSamples())
}
verifyInOrderSamples := func(lbls labels.Labels, expSamples int) {
@ -4503,7 +4580,7 @@ func TestOOOAppendWithNoSeries(t *testing.T) {
require.False(t, created)
require.NotNil(t, ms)
require.Nil(t, ms.oooHeadChunk)
require.Nil(t, ms.ooo)
require.NotNil(t, ms.headChunk)
require.Equal(t, expSamples, ms.headChunk.chunk.NumSamples())
}
@ -4587,3 +4664,153 @@ func TestHeadMinOOOTimeUpdate(t *testing.T) {
require.NoError(t, h.truncateOOO(0, 2))
require.Equal(t, 295*time.Minute.Milliseconds(), h.MinOOOTime())
}
func TestGaugeHistogramWALAndChunkHeader(t *testing.T) {
l := labels.FromStrings("a", "b")
head, _ := newTestHead(t, 1000, false, false)
t.Cleanup(func() {
require.NoError(t, head.Close())
})
require.NoError(t, head.Init(0))
ts := int64(0)
appendHistogram := func(h *histogram.Histogram) {
ts++
app := head.Appender(context.Background())
_, err := app.AppendHistogram(0, l, ts, h.Copy(), nil)
require.NoError(t, err)
require.NoError(t, app.Commit())
}
hists := GenerateTestGaugeHistograms(5)
hists[0].CounterResetHint = histogram.UnknownCounterReset
appendHistogram(hists[0])
appendHistogram(hists[1])
appendHistogram(hists[2])
hists[3].CounterResetHint = histogram.UnknownCounterReset
appendHistogram(hists[3])
appendHistogram(hists[3])
appendHistogram(hists[4])
checkHeaders := func() {
ms, _, err := head.getOrCreate(l.Hash(), l)
require.NoError(t, err)
require.Len(t, ms.mmappedChunks, 3)
expHeaders := []chunkenc.CounterResetHeader{
chunkenc.UnknownCounterReset,
chunkenc.GaugeType,
chunkenc.UnknownCounterReset,
chunkenc.GaugeType,
}
for i, mmapChunk := range ms.mmappedChunks {
chk, err := head.chunkDiskMapper.Chunk(mmapChunk.ref)
require.NoError(t, err)
require.Equal(t, expHeaders[i], chk.(*chunkenc.HistogramChunk).GetCounterResetHeader())
}
require.Equal(t, expHeaders[len(expHeaders)-1], ms.headChunk.chunk.(*chunkenc.HistogramChunk).GetCounterResetHeader())
}
checkHeaders()
recs := readTestWAL(t, head.wal.Dir())
require.Equal(t, []interface{}{
[]record.RefSeries{
{
Ref: 1,
Labels: labels.FromStrings("a", "b"),
},
},
[]record.RefHistogramSample{{Ref: 1, T: 1, H: hists[0]}},
[]record.RefHistogramSample{{Ref: 1, T: 2, H: hists[1]}},
[]record.RefHistogramSample{{Ref: 1, T: 3, H: hists[2]}},
[]record.RefHistogramSample{{Ref: 1, T: 4, H: hists[3]}},
[]record.RefHistogramSample{{Ref: 1, T: 5, H: hists[3]}},
[]record.RefHistogramSample{{Ref: 1, T: 6, H: hists[4]}},
}, recs)
// Restart Head without mmap chunks to expect the WAL replay to recognize gauge histograms.
require.NoError(t, head.Close())
require.NoError(t, os.RemoveAll(mmappedChunksDir(head.opts.ChunkDirRoot)))
w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false)
require.NoError(t, err)
head, err = NewHead(nil, nil, w, nil, head.opts, nil)
require.NoError(t, err)
require.NoError(t, head.Init(0))
checkHeaders()
}
func TestGaugeFloatHistogramWALAndChunkHeader(t *testing.T) {
l := labels.FromStrings("a", "b")
head, _ := newTestHead(t, 1000, false, false)
t.Cleanup(func() {
require.NoError(t, head.Close())
})
require.NoError(t, head.Init(0))
ts := int64(0)
appendHistogram := func(h *histogram.FloatHistogram) {
ts++
app := head.Appender(context.Background())
_, err := app.AppendHistogram(0, l, ts, nil, h.Copy())
require.NoError(t, err)
require.NoError(t, app.Commit())
}
hists := GenerateTestGaugeFloatHistograms(5)
hists[0].CounterResetHint = histogram.UnknownCounterReset
appendHistogram(hists[0])
appendHistogram(hists[1])
appendHistogram(hists[2])
hists[3].CounterResetHint = histogram.UnknownCounterReset
appendHistogram(hists[3])
appendHistogram(hists[3])
appendHistogram(hists[4])
checkHeaders := func() {
ms, _, err := head.getOrCreate(l.Hash(), l)
require.NoError(t, err)
require.Len(t, ms.mmappedChunks, 3)
expHeaders := []chunkenc.CounterResetHeader{
chunkenc.UnknownCounterReset,
chunkenc.GaugeType,
chunkenc.UnknownCounterReset,
chunkenc.GaugeType,
}
for i, mmapChunk := range ms.mmappedChunks {
chk, err := head.chunkDiskMapper.Chunk(mmapChunk.ref)
require.NoError(t, err)
require.Equal(t, expHeaders[i], chk.(*chunkenc.FloatHistogramChunk).GetCounterResetHeader())
}
require.Equal(t, expHeaders[len(expHeaders)-1], ms.headChunk.chunk.(*chunkenc.FloatHistogramChunk).GetCounterResetHeader())
}
checkHeaders()
recs := readTestWAL(t, head.wal.Dir())
require.Equal(t, []interface{}{
[]record.RefSeries{
{
Ref: 1,
Labels: labels.FromStrings("a", "b"),
},
},
[]record.RefFloatHistogramSample{{Ref: 1, T: 1, FH: hists[0]}},
[]record.RefFloatHistogramSample{{Ref: 1, T: 2, FH: hists[1]}},
[]record.RefFloatHistogramSample{{Ref: 1, T: 3, FH: hists[2]}},
[]record.RefFloatHistogramSample{{Ref: 1, T: 4, FH: hists[3]}},
[]record.RefFloatHistogramSample{{Ref: 1, T: 5, FH: hists[3]}},
[]record.RefFloatHistogramSample{{Ref: 1, T: 6, FH: hists[4]}},
}, recs)
// Restart Head without mmap chunks to expect the WAL replay to recognize gauge histograms.
require.NoError(t, head.Close())
require.NoError(t, os.RemoveAll(mmappedChunksDir(head.opts.ChunkDirRoot)))
w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false)
require.NoError(t, err)
head, err = NewHead(nil, nil, w, nil, head.opts, nil)
require.NoError(t, err)
require.NoError(t, head.Init(0))
checkHeaders()
}

View file

@ -496,10 +496,18 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m
}
h.metrics.chunksCreated.Add(float64(len(mmc) + len(oooMmc)))
h.metrics.chunksRemoved.Add(float64(len(mSeries.mmappedChunks) + len(mSeries.oooMmappedChunks)))
h.metrics.chunks.Add(float64(len(mmc) + len(oooMmc) - len(mSeries.mmappedChunks) - len(mSeries.oooMmappedChunks)))
h.metrics.chunksRemoved.Add(float64(len(mSeries.mmappedChunks)))
h.metrics.chunks.Add(float64(len(mmc) + len(oooMmc) - len(mSeries.mmappedChunks)))
mSeries.mmappedChunks = mmc
mSeries.oooMmappedChunks = oooMmc
mSeries.ooo = nil
if len(oooMmc) == 0 {
mSeries.ooo = nil
} else {
if mSeries.ooo == nil {
mSeries.ooo = &memSeriesOOOFields{}
}
*mSeries.ooo = memSeriesOOOFields{oooMmappedChunks: oooMmc}
}
// Cache the last mmapped chunk time, so we can skip calling append() for samples it will reject.
if len(mmc) == 0 {
mSeries.mmMaxTime = math.MinInt64
@ -818,7 +826,9 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
// chunk size parameters, we are not taking care of that here.
// TODO(codesome): see if there is a way to avoid duplicate m-map chunks if
// the size of ooo chunk was reduced between restart.
ms.oooHeadChunk = nil
if ms.ooo != nil {
ms.ooo.oooHeadChunk = nil
}
processors[idx].mx.Unlock()
}

View file

@ -1662,6 +1662,7 @@ func (r *Reader) Postings(name string, values ...string) (Postings, error) {
return EmptyPostings(), nil
}
slices.Sort(values) // Values must be in order so we can step through the table on disk.
res := make([]Postings, 0, len(values))
skip := 0
valueIndex := 0
@ -1906,7 +1907,7 @@ func (dec *Decoder) LabelValueFor(b []byte, label string) (string, error) {
}
// Series decodes a series entry from the given byte slice into builder and chks.
// Previous contents of lbls can be overwritten - make sure you copy before retaining.
// Previous contents of builder can be overwritten - make sure you copy before retaining.
func (dec *Decoder) Series(b []byte, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
builder.Reset()
if chks != nil {

View file

@ -36,6 +36,15 @@ func NewOOOChunk() *OOOChunk {
// Insert inserts the sample such that order is maintained.
// Returns false if insert was not possible due to the same timestamp already existing.
func (o *OOOChunk) Insert(t int64, v float64) bool {
// Although out-of-order samples can be out-of-order amongst themselves, we
// are opinionated and expect them to be usually in-order meaning we could
// try to append at the end first if the new timestamp is higher than the
// last known timestamp.
if len(o.samples) == 0 || t > o.samples[len(o.samples)-1].t {
o.samples = append(o.samples, sample{t, v, nil, nil})
return true
}
// Find index of sample we should replace.
i := sort.Search(len(o.samples), func(i int) bool { return o.samples[i].t >= t })
@ -45,6 +54,7 @@ func (o *OOOChunk) Insert(t int64, v float64) bool {
return true
}
// Duplicate sample for timestamp is not allowed.
if o.samples[i].t == t {
return false
}

View file

@ -71,7 +71,11 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
defer s.Unlock()
*chks = (*chks)[:0]
tmpChks := make([]chunks.Meta, 0, len(s.oooMmappedChunks))
if s.ooo == nil {
return nil
}
tmpChks := make([]chunks.Meta, 0, len(s.ooo.oooMmappedChunks))
// We define these markers to track the last chunk reference while we
// fill the chunk meta.
@ -103,15 +107,15 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
// Collect all chunks that overlap the query range, in order from most recent to most old,
// so we can set the correct markers.
if s.oooHeadChunk != nil {
c := s.oooHeadChunk
if s.ooo.oooHeadChunk != nil {
c := s.ooo.oooHeadChunk
if c.OverlapsClosedInterval(oh.mint, oh.maxt) && lastMmapRef == 0 {
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.oooMmappedChunks))))
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks))))
addChunk(c.minTime, c.maxTime, ref)
}
}
for i := len(s.oooMmappedChunks) - 1; i >= 0; i-- {
c := s.oooMmappedChunks[i]
for i := len(s.ooo.oooMmappedChunks) - 1; i >= 0; i-- {
c := s.ooo.oooMmappedChunks[i]
if c.OverlapsClosedInterval(oh.mint, oh.maxt) && (lastMmapRef == 0 || lastMmapRef.GreaterThanOrEqualTo(c.ref)) {
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i)))
addChunk(c.minTime, c.maxTime, ref)
@ -238,6 +242,11 @@ func (cr OOOHeadChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) {
}
s.Lock()
if s.ooo == nil {
// There is no OOO data for this series.
s.Unlock()
return nil, storage.ErrNotFound
}
c, err := s.oooMergedChunk(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt)
s.Unlock()
if err != nil {
@ -308,18 +317,23 @@ func NewOOOCompactionHead(head *Head) (*OOOCompactionHead, error) {
// TODO: consider having a lock specifically for ooo data.
ms.Lock()
if ms.ooo == nil {
ms.Unlock()
continue
}
mmapRef := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper)
if mmapRef == 0 && len(ms.oooMmappedChunks) > 0 {
if mmapRef == 0 && len(ms.ooo.oooMmappedChunks) > 0 {
// Nothing was m-mapped. So take the mmapRef from the existing slice if it exists.
mmapRef = ms.oooMmappedChunks[len(ms.oooMmappedChunks)-1].ref
mmapRef = ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref
}
seq, off := mmapRef.Unpack()
if seq > lastSeq || (seq == lastSeq && off > lastOff) {
ch.lastMmapRef, lastSeq, lastOff = mmapRef, seq, off
}
if len(ms.oooMmappedChunks) > 0 {
if len(ms.ooo.oooMmappedChunks) > 0 {
ch.postings = append(ch.postings, seriesRef)
for _, c := range ms.oooMmappedChunks {
for _, c := range ms.ooo.oooMmappedChunks {
if c.minTime < ch.mint {
ch.mint = c.minTime
}

View file

@ -301,6 +301,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
require.NoError(t, h.Init(0))
s1, _, _ := h.getOrCreate(s1ID, s1Lset)
s1.ooo = &memSeriesOOOFields{}
var lastChunk chunkInterval
var lastChunkPos int
@ -340,7 +341,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
if headChunk && len(intervals) > 0 {
// Put the last interval in the head chunk
s1.oooHeadChunk = &oooHeadChunk{
s1.ooo.oooHeadChunk = &oooHeadChunk{
minTime: intervals[len(intervals)-1].mint,
maxTime: intervals[len(intervals)-1].maxt,
}
@ -348,7 +349,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
}
for _, ic := range intervals {
s1.oooMmappedChunks = append(s1.oooMmappedChunks, &mmappedChunk{
s1.ooo.oooMmappedChunks = append(s1.ooo.oooMmappedChunks, &mmappedChunk{
minTime: ic.mint,
maxTime: ic.maxt,
})

View file

@ -19,7 +19,6 @@ import (
"github.com/oklog/ulid"
"github.com/pkg/errors"
"golang.org/x/exp/slices"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
@ -189,7 +188,14 @@ func PostingsForMatchers(ix IndexPostingsReader, ms ...*labels.Matcher) (index.P
}
for _, m := range ms {
if labelMustBeSet[m.Name] {
if m.Name == "" && m.Value == "" { // Special-case for AllPostings, used in tests at least.
k, v := index.AllPostingsKey()
allPostings, err := ix.Postings(k, v)
if err != nil {
return nil, err
}
its = append(its, allPostings)
} else if labelMustBeSet[m.Name] {
// If this matcher must be non-empty, we can be smarter.
matchesEmpty := m.Matches("")
isNot := m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp
@ -277,7 +283,6 @@ func postingsForMatcher(ix IndexPostingsReader, m *labels.Matcher) (index.Postin
if m.Type == labels.MatchRegexp {
setMatches := m.SetMatches()
if len(setMatches) > 0 {
slices.Sort(setMatches)
return ix.Postings(m.Name, setMatches...)
}
}
@ -288,14 +293,9 @@ func postingsForMatcher(ix IndexPostingsReader, m *labels.Matcher) (index.Postin
}
var res []string
lastVal, isSorted := "", true
for _, val := range vals {
if m.Matches(val) {
res = append(res, val)
if isSorted && val < lastVal {
isSorted = false
}
lastVal = val
}
}
@ -303,9 +303,6 @@ func postingsForMatcher(ix IndexPostingsReader, m *labels.Matcher) (index.Postin
return index.EmptyPostings(), nil
}
if !isSorted {
slices.Sort(res)
}
return ix.Postings(m.Name, res...)
}
@ -317,20 +314,17 @@ func inversePostingsForMatcher(ix IndexPostingsReader, m *labels.Matcher) (index
}
var res []string
lastVal, isSorted := "", true
for _, val := range vals {
if !m.Matches(val) {
res = append(res, val)
if isSorted && val < lastVal {
isSorted = false
// If the inverse match is ="", we just want all the values.
if m.Type == labels.MatchEqual && m.Value == "" {
res = vals
} else {
for _, val := range vals {
if !m.Matches(val) {
res = append(res, val)
}
lastVal = val
}
}
if !isSorted {
slices.Sort(res)
}
return ix.Postings(m.Name, res...)
}

View file

@ -93,7 +93,7 @@ func BenchmarkQuerier(b *testing.B) {
func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
n1 := labels.MustNewMatcher(labels.MatchEqual, "n", "1"+postingsBenchSuffix)
nX := labels.MustNewMatcher(labels.MatchNotEqual, "n", "X"+postingsBenchSuffix)
nX := labels.MustNewMatcher(labels.MatchEqual, "n", "X"+postingsBenchSuffix)
jFoo := labels.MustNewMatcher(labels.MatchEqual, "j", "foo")
jNotFoo := labels.MustNewMatcher(labels.MatchNotEqual, "j", "foo")

View file

@ -441,6 +441,8 @@ func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample)
H: &histogram.Histogram{},
}
rh.H.CounterResetHint = histogram.CounterResetHint(dec.Byte())
rh.H.Schema = int32(dec.Varint64())
rh.H.ZeroThreshold = math.Float64frombits(dec.Be64())
@ -517,6 +519,8 @@ func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogr
FH: &histogram.FloatHistogram{},
}
rh.FH.CounterResetHint = histogram.CounterResetHint(dec.Byte())
rh.FH.Schema = int32(dec.Varint64())
rh.FH.ZeroThreshold = dec.Be64Float64()
@ -715,6 +719,8 @@ func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) []
buf.PutVarint64(int64(h.Ref) - int64(first.Ref))
buf.PutVarint64(h.T - first.T)
buf.PutByte(byte(h.H.CounterResetHint))
buf.PutVarint64(int64(h.H.Schema))
buf.PutBE64(math.Float64bits(h.H.ZeroThreshold))
@ -766,6 +772,8 @@ func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b
buf.PutVarint64(int64(h.Ref) - int64(first.Ref))
buf.PutVarint64(h.T - first.T)
buf.PutByte(byte(h.FH.CounterResetHint))
buf.PutVarint64(int64(h.FH.Schema))
buf.PutBEFloat64(h.FH.ZeroThreshold)

View file

@ -165,6 +165,22 @@ func TestRecord_EncodeDecode(t *testing.T) {
decFloatHistograms, err := dec.FloatHistogramSamples(enc.FloatHistogramSamples(floatHistograms, nil), nil)
require.NoError(t, err)
require.Equal(t, floatHistograms, decFloatHistograms)
// Gauge ingeger histograms.
for i := range histograms {
histograms[i].H.CounterResetHint = histogram.GaugeType
}
decHistograms, err = dec.HistogramSamples(enc.HistogramSamples(histograms, nil), nil)
require.NoError(t, err)
require.Equal(t, histograms, decHistograms)
// Gauge float histograms.
for i := range floatHistograms {
floatHistograms[i].FH.CounterResetHint = histogram.GaugeType
}
decFloatHistograms, err = dec.FloatHistogramSamples(enc.FloatHistogramSamples(floatHistograms, nil), nil)
require.NoError(t, err)
require.Equal(t, floatHistograms, decFloatHistograms)
}
// TestRecord_Corrupted ensures that corrupted records return the correct error.

View file

@ -1018,7 +1018,7 @@ func (r *walReader) next() bool {
// If we reached the end of the reader, advance to the next one
// and close.
// Do not close on the last one as it will still be appended to.
if err == io.EOF {
if errors.Is(err, io.EOF) {
if r.cur == len(r.files)-1 {
return false
}

View file

@ -96,7 +96,7 @@ type LiveReader struct {
// not be used again. It is up to the user to decide when to stop trying should
// io.EOF be returned.
func (r *LiveReader) Err() error {
if r.eofNonErr && r.err == io.EOF {
if r.eofNonErr && errors.Is(r.err, io.EOF) {
return nil
}
return r.err

View file

@ -43,7 +43,7 @@ func NewReader(r io.Reader) *Reader {
// It must not be called again after it returned false.
func (r *Reader) Next() bool {
err := r.next()
if errors.Cause(err) == io.EOF {
if errors.Is(err, io.EOF) {
// The last WAL segment record shouldn't be torn(should be full or last).
// The last record would be torn after a crash just before
// the last record part could be persisted to disk.

View file

@ -50,6 +50,7 @@ type WriteTo interface {
Append([]record.RefSample) bool
AppendExemplars([]record.RefExemplar) bool
AppendHistograms([]record.RefHistogramSample) bool
AppendFloatHistograms([]record.RefFloatHistogramSample) bool
StoreSeries([]record.RefSeries, int)
// Next two methods are intended for garbage-collection: first we call
@ -476,13 +477,15 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error {
// Also used with readCheckpoint - implements segmentReadFn.
func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
var (
dec record.Decoder
series []record.RefSeries
samples []record.RefSample
samplesToSend []record.RefSample
exemplars []record.RefExemplar
histograms []record.RefHistogramSample
histogramsToSend []record.RefHistogramSample
dec record.Decoder
series []record.RefSeries
samples []record.RefSample
samplesToSend []record.RefSample
exemplars []record.RefExemplar
histograms []record.RefHistogramSample
histogramsToSend []record.RefHistogramSample
floatHistograms []record.RefFloatHistogramSample
floatHistogramsToSend []record.RefFloatHistogramSample
)
for r.Next() && !isClosed(w.quit) {
rec := r.Record()
@ -567,7 +570,33 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
w.writer.AppendHistograms(histogramsToSend)
histogramsToSend = histogramsToSend[:0]
}
case record.FloatHistogramSamples:
// Skip if experimental "histograms over remote write" is not enabled.
if !w.sendHistograms {
break
}
if !tail {
break
}
floatHistograms, err := dec.FloatHistogramSamples(rec, floatHistograms[:0])
if err != nil {
w.recordDecodeFailsMetric.Inc()
return err
}
for _, fh := range floatHistograms {
if fh.T > w.startTimestamp {
if !w.sendSamples {
w.sendSamples = true
duration := time.Since(w.startTime)
level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration)
}
floatHistogramsToSend = append(floatHistogramsToSend, fh)
}
}
if len(floatHistogramsToSend) > 0 {
w.writer.AppendFloatHistograms(floatHistogramsToSend)
floatHistogramsToSend = floatHistogramsToSend[:0]
}
case record.Tombstones:
default:

View file

@ -52,11 +52,12 @@ func retry(t *testing.T, interval time.Duration, n int, f func() bool) {
}
type writeToMock struct {
samplesAppended int
exemplarsAppended int
histogramsAppended int
seriesLock sync.Mutex
seriesSegmentIndexes map[chunks.HeadSeriesRef]int
samplesAppended int
exemplarsAppended int
histogramsAppended int
floatHistogramsAppended int
seriesLock sync.Mutex
seriesSegmentIndexes map[chunks.HeadSeriesRef]int
}
func (wtm *writeToMock) Append(s []record.RefSample) bool {
@ -74,6 +75,11 @@ func (wtm *writeToMock) AppendHistograms(h []record.RefHistogramSample) bool {
return true
}
func (wtm *writeToMock) AppendFloatHistograms(fh []record.RefFloatHistogramSample) bool {
wtm.floatHistogramsAppended += len(fh)
return true
}
func (wtm *writeToMock) StoreSeries(series []record.RefSeries, index int) {
wtm.UpdateSeriesSegment(series, index)
}
@ -171,22 +177,31 @@ func TestTailSamples(t *testing.T) {
for j := 0; j < histogramsCount; j++ {
inner := rand.Intn(ref + 1)
hist := &histogram.Histogram{
Schema: 2,
ZeroThreshold: 1e-128,
ZeroCount: 0,
Count: 2,
Sum: 0,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []int64{int64(i) + 1},
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
NegativeBuckets: []int64{int64(-i) - 1},
}
histogram := enc.HistogramSamples([]record.RefHistogramSample{{
Ref: chunks.HeadSeriesRef(inner),
T: now.UnixNano() + 1,
H: &histogram.Histogram{
Schema: 2,
ZeroThreshold: 1e-128,
ZeroCount: 0,
Count: 2,
Sum: 0,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []int64{int64(i) + 1},
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
NegativeBuckets: []int64{int64(-i) - 1},
},
H: hist,
}}, nil)
require.NoError(t, w.Log(histogram))
floatHistogram := enc.FloatHistogramSamples([]record.RefFloatHistogramSample{{
Ref: chunks.HeadSeriesRef(inner),
T: now.UnixNano() + 1,
FH: hist.ToFloat(),
}}, nil)
require.NoError(t, w.Log(floatHistogram))
}
}
@ -221,6 +236,7 @@ func TestTailSamples(t *testing.T) {
require.Equal(t, expectedSamples, wt.samplesAppended, "did not receive the expected number of samples")
require.Equal(t, expectedExemplars, wt.exemplarsAppended, "did not receive the expected number of exemplars")
require.Equal(t, expectedHistograms, wt.histogramsAppended, "did not receive the expected number of histograms")
require.Equal(t, expectedHistograms, wt.floatHistogramsAppended, "did not receive the expected number of float histograms")
})
}
}

View file

@ -26,6 +26,7 @@ import (
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/model/value"
@ -103,6 +104,7 @@ func (h *Handler) federation(w http.ResponseWriter, req *http.Request) {
set := storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge)
it := storage.NewBuffer(int64(h.lookbackDelta / 1e6))
var chkIter chunkenc.Iterator
Loop:
for set.Next() {
s := set.At()
@ -111,18 +113,26 @@ func (h *Handler) federation(w http.ResponseWriter, req *http.Request) {
chkIter = s.Iterator(chkIter)
it.Reset(chkIter)
var t int64
var v float64
var ok bool
var (
t int64
v float64
h *histogram.Histogram
fh *histogram.FloatHistogram
ok bool
)
valueType := it.Seek(maxt)
if valueType == chunkenc.ValFloat {
switch valueType {
case chunkenc.ValFloat:
t, v = it.At()
} else {
// TODO(beorn7): Handle histograms.
t, v, _, ok = it.PeekBack(1)
case chunkenc.ValFloatHistogram, chunkenc.ValHistogram:
t, fh = it.AtFloatHistogram()
default:
t, v, h, fh, ok = it.PeekBack(1)
if !ok {
continue
continue Loop
}
if h != nil {
fh = h.ToFloat()
}
}
// The exposition formats do not support stale markers, so drop them. This
@ -135,7 +145,7 @@ func (h *Handler) federation(w http.ResponseWriter, req *http.Request) {
vec = append(vec, promql.Sample{
Metric: s.Labels(),
Point: promql.Point{T: t, V: v},
Point: promql.Point{T: t, V: v, H: fh},
})
}
if ws := set.Warnings(); len(ws) > 0 {
@ -161,15 +171,22 @@ func (h *Handler) federation(w http.ResponseWriter, req *http.Request) {
sort.Strings(externalLabelNames)
var (
lastMetricName string
protMetricFam *dto.MetricFamily
lastMetricName string
lastWasHistogram, lastHistogramWasGauge bool
protMetricFam *dto.MetricFamily
)
for _, s := range vec {
isHistogram := s.H != nil
if isHistogram &&
format != expfmt.FmtProtoDelim && format != expfmt.FmtProtoText && format != expfmt.FmtProtoCompact {
// Can't serve the native histogram.
// TODO(codesome): Serve them when other protocols get the native histogram support.
continue
}
nameSeen := false
globalUsed := map[string]struct{}{}
protMetric := &dto.Metric{
Untyped: &dto.Untyped{},
}
protMetric := &dto.Metric{}
err := s.Metric.Validate(func(l labels.Label) error {
if l.Value == "" {
@ -179,11 +196,18 @@ func (h *Handler) federation(w http.ResponseWriter, req *http.Request) {
}
if l.Name == labels.MetricName {
nameSeen = true
if l.Value == lastMetricName {
// We already have the name in the current MetricFamily,
// and we ignore nameless metrics.
if l.Value == lastMetricName && // We already have the name in the current MetricFamily, and we ignore nameless metrics.
lastWasHistogram == isHistogram && // The sample type matches (float vs histogram).
// If it was a histogram, the histogram type (counter vs gauge) also matches.
(!isHistogram || lastHistogramWasGauge == (s.H.CounterResetHint == histogram.GaugeType)) {
return nil
}
// Since we now check for the sample type and type of histogram above, we will end up
// creating multiple metric families for the same metric name. This would technically be
// an invalid exposition. But since the consumer of this is Prometheus, and Prometheus can
// parse it fine, we allow it and bend the rules to make federation possible in those cases.
// Need to start a new MetricFamily. Ship off the old one (if any) before
// creating the new one.
if protMetricFam != nil {
@ -195,6 +219,13 @@ func (h *Handler) federation(w http.ResponseWriter, req *http.Request) {
Type: dto.MetricType_UNTYPED.Enum(),
Name: proto.String(l.Value),
}
if isHistogram {
if s.H.CounterResetHint == histogram.GaugeType {
protMetricFam.Type = dto.MetricType_GAUGE_HISTOGRAM.Enum()
} else {
protMetricFam.Type = dto.MetricType_HISTOGRAM.Enum()
}
}
lastMetricName = l.Value
return nil
}
@ -228,9 +259,42 @@ func (h *Handler) federation(w http.ResponseWriter, req *http.Request) {
}
protMetric.TimestampMs = proto.Int64(s.T)
protMetric.Untyped.Value = proto.Float64(s.V)
// TODO(beorn7): Handle histograms.
if !isHistogram {
lastHistogramWasGauge = false
protMetric.Untyped = &dto.Untyped{
Value: proto.Float64(s.V),
}
} else {
lastHistogramWasGauge = s.H.CounterResetHint == histogram.GaugeType
protMetric.Histogram = &dto.Histogram{
SampleCountFloat: proto.Float64(s.H.Count),
SampleSum: proto.Float64(s.H.Sum),
Schema: proto.Int32(s.H.Schema),
ZeroThreshold: proto.Float64(s.H.ZeroThreshold),
ZeroCountFloat: proto.Float64(s.H.ZeroCount),
NegativeCount: s.H.NegativeBuckets,
PositiveCount: s.H.PositiveBuckets,
}
if len(s.H.PositiveSpans) > 0 {
protMetric.Histogram.PositiveSpan = make([]*dto.BucketSpan, len(s.H.PositiveSpans))
for i, sp := range s.H.PositiveSpans {
protMetric.Histogram.PositiveSpan[i] = &dto.BucketSpan{
Offset: proto.Int32(sp.Offset),
Length: proto.Uint32(sp.Length),
}
}
}
if len(s.H.NegativeSpans) > 0 {
protMetric.Histogram.NegativeSpan = make([]*dto.BucketSpan, len(s.H.NegativeSpans))
for i, sp := range s.H.NegativeSpans {
protMetric.Histogram.NegativeSpan[i] = &dto.BucketSpan{
Offset: proto.Int32(sp.Offset),
Length: proto.Uint32(sp.Length),
}
}
}
}
lastWasHistogram = isHistogram
protMetricFam.Metric = append(protMetricFam.Metric, protMetric)
}
// Still have to ship off the last MetricFamily, if any.

View file

@ -16,6 +16,8 @@ package web
import (
"bytes"
"context"
"fmt"
"io"
"net/http"
"net/http/httptest"
"sort"
@ -28,7 +30,9 @@ import (
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/textparse"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb"
@ -299,3 +303,114 @@ func normalizeBody(body *bytes.Buffer) string {
}
return strings.Join(lines, "")
}
func TestFederationWithNativeHistograms(t *testing.T) {
suite, err := promql.NewTest(t, "")
if err != nil {
t.Fatal(err)
}
defer suite.Close()
if err := suite.Run(); err != nil {
t.Fatal(err)
}
var expVec promql.Vector
db := suite.TSDB()
hist := &histogram.Histogram{
Count: 10,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 39.4,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
NegativeBuckets: []int64{1, 1, -1, 0},
}
app := db.Appender(context.Background())
for i := 0; i < 6; i++ {
l := labels.FromStrings("__name__", "test_metric", "foo", fmt.Sprintf("%d", i))
expL := labels.FromStrings("__name__", "test_metric", "instance", "", "foo", fmt.Sprintf("%d", i))
if i%3 == 0 {
_, err = app.Append(0, l, 100*60*1000, float64(i*100))
expVec = append(expVec, promql.Sample{
Point: promql.Point{T: 100 * 60 * 1000, V: float64(i * 100)},
Metric: expL,
})
} else {
hist.ZeroCount++
_, err = app.AppendHistogram(0, l, 100*60*1000, hist.Copy(), nil)
expVec = append(expVec, promql.Sample{
Point: promql.Point{T: 100 * 60 * 1000, H: hist.ToFloat()},
Metric: expL,
})
}
require.NoError(t, err)
}
require.NoError(t, app.Commit())
h := &Handler{
localStorage: &dbAdapter{suite.TSDB()},
lookbackDelta: 5 * time.Minute,
now: func() model.Time { return 101 * 60 * 1000 }, // 101min after epoch.
config: &config.Config{
GlobalConfig: config.GlobalConfig{},
},
}
req := httptest.NewRequest("GET", "http://example.org/federate?match[]=test_metric", nil)
req.Header.Add("Accept", `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited,application/openmetrics-text;version=1.0.0;q=0.8,application/openmetrics-text;version=0.0.1;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1`)
res := httptest.NewRecorder()
h.federation(res, req)
require.Equal(t, http.StatusOK, res.Code)
body, err := io.ReadAll(res.Body)
require.NoError(t, err)
p := textparse.NewProtobufParser(body)
var actVec promql.Vector
metricFamilies := 0
for {
et, err := p.Next()
if err == io.EOF {
break
}
require.NoError(t, err)
if et == textparse.EntryHelp {
metricFamilies++
}
if et == textparse.EntryHistogram || et == textparse.EntrySeries {
l := labels.Labels{}
p.Metric(&l)
actVec = append(actVec, promql.Sample{Metric: l})
}
if et == textparse.EntryHistogram {
_, parsedTimestamp, h, fh := p.Histogram()
require.Nil(t, h)
actVec[len(actVec)-1].Point = promql.Point{
T: *parsedTimestamp,
H: fh,
}
} else if et == textparse.EntrySeries {
_, parsedTimestamp, v := p.Series()
actVec[len(actVec)-1].Point = promql.Point{
T: *parsedTimestamp,
V: v,
}
}
}
// TODO(codesome): Once PromQL is able to set the CounterResetHint on histograms,
// test it with switching histogram types for metric families.
require.Equal(t, 4, metricFamilies)
require.Equal(t, expVec, actVec)
}

View file

@ -16,10 +16,14 @@
- PROMETHEUS_AGENT_MODE is replaced by a boolean indicating if Prometheus is running in agent mode.
It true, it will disable querying capacities in the UI and generally adapt the UI to the agent mode.
It has to be represented as a string, because booleans can be mangled to !1 in production builds.
- PROMETHEUS_READY is replaced by a boolean indicating whether Prometheus was ready at the time the
web app was served. It has to be represented as a string, because booleans can be mangled to !1 in
production builds.
-->
<script>
const GLOBAL_CONSOLES_LINK='CONSOLES_LINK_PLACEHOLDER';
const GLOBAL_AGENT_MODE='AGENT_MODE_PLACEHOLDER';
const GLOBAL_READY='READY_PLACEHOLDER';
</script>
<!--

View file

@ -18,7 +18,7 @@ import {
} from './pages';
describe('App', () => {
const app = shallow(<App consolesLink={null} agentMode={false} />);
const app = shallow(<App consolesLink={null} agentMode={false} ready={false} />);
it('navigates', () => {
expect(app.find(Navigation)).toHaveLength(1);

View file

@ -5,6 +5,7 @@ import Navigation from './Navbar';
import { BrowserRouter as Router, Redirect, Route, Switch } from 'react-router-dom';
import { PathPrefixContext } from './contexts/PathPrefixContext';
import { ThemeContext, themeName, themeSetting } from './contexts/ThemeContext';
import { ReadyContext } from './contexts/ReadyContext';
import { useLocalStorage } from './hooks/useLocalStorage';
import useMedia from './hooks/useMedia';
import {
@ -24,9 +25,10 @@ import { Theme, themeLocalStorageKey } from './Theme';
interface AppProps {
consolesLink: string | null;
agentMode: boolean;
ready: boolean;
}
const App: FC<AppProps> = ({ consolesLink, agentMode }) => {
const App: FC<AppProps> = ({ consolesLink, agentMode, ready }) => {
// This dynamically/generically determines the pathPrefix by stripping the first known
// endpoint suffix from the window location path. It works out of the box for both direct
// hosting and reverse proxy deployments with no additional configurations required.
@ -72,48 +74,50 @@ const App: FC<AppProps> = ({ consolesLink, agentMode }) => {
>
<Theme />
<PathPrefixContext.Provider value={basePath}>
<Router basename={basePath}>
<Navigation consolesLink={consolesLink} agentMode={agentMode} />
<Container fluid style={{ paddingTop: 70 }}>
<Switch>
<Redirect exact from="/" to={agentMode ? '/agent' : '/graph'} />
{/*
<ReadyContext.Provider value={ready}>
<Router basename={basePath}>
<Navigation consolesLink={consolesLink} agentMode={agentMode} />
<Container fluid style={{ paddingTop: 70 }}>
<Switch>
<Redirect exact from="/" to={agentMode ? '/agent' : '/graph'} />
{/*
NOTE: Any route added here needs to also be added to the list of
React-handled router paths ("reactRouterPaths") in /web/web.go.
*/}
<Route path="/agent">
<AgentPage />
</Route>
<Route path="/graph">
<PanelListPage />
</Route>
<Route path="/alerts">
<AlertsPage />
</Route>
<Route path="/config">
<ConfigPage />
</Route>
<Route path="/flags">
<FlagsPage />
</Route>
<Route path="/rules">
<RulesPage />
</Route>
<Route path="/service-discovery">
<ServiceDiscoveryPage />
</Route>
<Route path="/status">
<StatusPage agentMode={agentMode} />
</Route>
<Route path="/tsdb-status">
<TSDBStatusPage />
</Route>
<Route path="/targets">
<TargetsPage />
</Route>
</Switch>
</Container>
</Router>
<Route path="/agent">
<AgentPage />
</Route>
<Route path="/graph">
<PanelListPage />
</Route>
<Route path="/alerts">
<AlertsPage />
</Route>
<Route path="/config">
<ConfigPage />
</Route>
<Route path="/flags">
<FlagsPage />
</Route>
<Route path="/rules">
<RulesPage />
</Route>
<Route path="/service-discovery">
<ServiceDiscoveryPage />
</Route>
<Route path="/status">
<StatusPage agentMode={agentMode} />
</Route>
<Route path="/tsdb-status">
<TSDBStatusPage />
</Route>
<Route path="/targets">
<TargetsPage />
</Route>
</Switch>
</Container>
</Router>
</ReadyContext.Provider>
</PathPrefixContext.Provider>
</ThemeContext.Provider>
);

View file

@ -4,6 +4,7 @@ import { Progress, Alert } from 'reactstrap';
import { useFetchReadyInterval } from '../hooks/useFetch';
import { WALReplayData } from '../types/types';
import { usePathPrefix } from '../contexts/PathPrefixContext';
import { useReady } from '../contexts/ReadyContext';
interface StartingContentProps {
isUnexpected: boolean;
@ -48,8 +49,9 @@ export const withStartingIndicator =
({ ...rest }) => {
const pathPrefix = usePathPrefix();
const { ready, walReplayStatus, isUnexpected } = useFetchReadyInterval(pathPrefix);
const staticReady = useReady();
if (ready || isUnexpected) {
if (staticReady || ready || isUnexpected) {
return <Page {...(rest as T)} />;
}

View file

@ -0,0 +1,9 @@
import React from 'react';
const ReadyContext = React.createContext(false);
function useReady(): boolean {
return React.useContext(ReadyContext);
}
export { useReady, ReadyContext };

View file

@ -11,9 +11,11 @@ import { isPresent } from './utils';
// Declared/defined in public/index.html, value replaced by Prometheus when serving bundle.
declare const GLOBAL_CONSOLES_LINK: string;
declare const GLOBAL_AGENT_MODE: string;
declare const GLOBAL_READY: string;
let consolesLink: string | null = GLOBAL_CONSOLES_LINK;
const agentMode: string | null = GLOBAL_AGENT_MODE;
const ready: string | null = GLOBAL_READY;
if (
GLOBAL_CONSOLES_LINK === 'CONSOLES_LINK_PLACEHOLDER' ||
@ -23,4 +25,7 @@ if (
consolesLink = null;
}
ReactDOM.render(<App consolesLink={consolesLink} agentMode={agentMode === 'true'} />, document.getElementById('root'));
ReactDOM.render(
<App consolesLink={consolesLink} agentMode={agentMode === 'true'} ready={ready === 'true'} />,
document.getElementById('root')
);

View file

@ -401,6 +401,7 @@ func New(logger log.Logger, o *Options) *Handler {
replacedIdx := bytes.ReplaceAll(idx, []byte("CONSOLES_LINK_PLACEHOLDER"), []byte(h.consolesPath()))
replacedIdx = bytes.ReplaceAll(replacedIdx, []byte("TITLE_PLACEHOLDER"), []byte(h.options.PageTitle))
replacedIdx = bytes.ReplaceAll(replacedIdx, []byte("AGENT_MODE_PLACEHOLDER"), []byte(strconv.FormatBool(h.options.IsAgent)))
replacedIdx = bytes.ReplaceAll(replacedIdx, []byte("READY_PLACEHOLDER"), []byte(strconv.FormatBool(h.isReady())))
w.Write(replacedIdx)
}