Merge pull request #521 from grafana/prometheus-2023-07-31-76dd9b547

Update Prometheus to 2023-07-31 @ `76dd9b547`
This commit is contained in:
Oleg Zaytsev 2023-08-04 10:06:22 +02:00 committed by GitHub
commit 75ba9622f4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
61 changed files with 3299 additions and 920 deletions

View file

@ -3,6 +3,9 @@ run:
skip-files:
# Skip autogenerated files.
- ^.*\.(pb|y)\.go$
skip-dirs:
# Copied it from a different source
- storage/remote/otlptranslator/prometheusremotewrite
output:
sort-results: true

View file

@ -1,5 +1,26 @@
# Changelog
## 2.46.0 / 2023-07-25
* [FEATURE] Promtool: Add PromQL format and label matcher set/delete commands to promtool. #11411
* [FEATURE] Promtool: Add push metrics command. #12299
* [ENHANCEMENT] Promtool: Read from stdin if no filenames are provided in check rules. #12225
* [ENHANCEMENT] Hetzner SD: Support larger ID's that will be used by Hetzner in September. #12569
* [ENHANCEMENT] Kubernetes SD: Add more labels for endpointslice and endpoints role. #10914
* [ENHANCEMENT] Kubernetes SD: Do not add pods to target group if the PodIP status is not set. #11642
* [ENHANCEMENT] OpenStack SD: Include instance image ID in labels. #12502
* [ENHANCEMENT] Remote Write receiver: Validate the metric names and labels. #11688
* [ENHANCEMENT] Web: Initialize `prometheus_http_requests_total` metrics with `code` label set to `200`. #12472
* [ENHANCEMENT] TSDB: Add Zstandard compression option for wlog. #11666
* [ENHANCEMENT] TSDB: Support native histograms in snapshot on shutdown. #12258
* [ENHANCEMENT] Labels: Avoid compiling regexes that are literal. #12434
* [BUGFIX] Histograms: Fix parsing of float histograms without zero bucket. #12577
* [BUGFIX] Histograms: Fix scraping native and classic histograms missing some histograms. #12554
* [BUGFIX] Histograms: Enable ingestion of multiple exemplars per sample. 12557
* [BUGFIX] File SD: Fix path handling in File-SD watcher to allow directory monitoring on Windows. #12488
* [BUGFIX] Linode SD: Cast `InstanceSpec` values to `int64` to avoid overflows on 386 architecture. #12568
* [BUGFIX] PromQL Engine: Include query parsing in active-query tracking. #12418
* [BUGFIX] TSDB: Handle TOC parsing failures. #10623
## 2.45.0 / 2023-06-23

View file

@ -1 +1 @@
2.45.0
2.46.0

View file

@ -170,6 +170,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
case "remote-write-receiver":
c.web.EnableRemoteWriteReceiver = true
level.Warn(logger).Log("msg", "Remote write receiver enabled via feature flag remote-write-receiver. This is DEPRECATED. Use --web.enable-remote-write-receiver.")
case "otlp-write-receiver":
c.web.EnableOTLPWriteReceiver = true
level.Info(logger).Log("msg", "Experimental OTLP write receiver enabled")
case "expand-external-labels":
c.enableExpandExternalLabels = true
level.Info(logger).Log("msg", "Experimental expand-external-labels enabled")
@ -420,7 +423,7 @@ func main() {
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList)
promlogflag.AddFlags(a, &cfg.promlogConfig)

View file

@ -52,7 +52,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
| <code class="text-nowrap">--enable-feature</code> | Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| <code class="text-nowrap">--enable-feature</code> | Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |

View file

@ -55,6 +55,23 @@ docker run \
prom/prometheus
```
### Save your Prometheus data
Prometheus data is stored in `/prometheus` dir inside the container, so the data is cleared every time the container gets restarted. To save your data, you need to set up persistent storage (or bind mounts) for your container.
Run Prometheus container with persistent storage:
```bash
# Create persistent volume for your data
docker volume create prometheus-data
# Start Prometheus container
docker run \
-p 9090:9090 \
-v /path/to/prometheus.yml:/etc/prometheus/prometheus.yml \
-v prometheus-data:/prometheus \
prom/prometheus
```
### Custom image
To avoid managing a file on the host and bind-mount it, the

View file

@ -419,11 +419,10 @@ label_join(up{job="api-server",src1="a",src2="b",src3="c"}, "foo", ",", "src1",
## `label_replace()`
For each timeseries in `v`, `label_replace(v instant-vector, dst_label string, replacement string, src_label string, regex string)`
matches the regular expression `regex` against the value of the label `src_label`. If it
matches the [regular expression](https://github.com/google/re2/wiki/Syntax) `regex` against the value of the label `src_label`. If it
matches, the value of the label `dst_label` in the returned timeseries will be the expansion
of `replacement`, together with the original labels in the input. Capturing groups in the
regular expression can be referenced with `$1`, `$2`, etc. If the regular expression doesn't
match then the timeseries is returned unchanged.
regular expression can be referenced with `$1`, `$2`, etc. Named capturing groups in the regular expression can be referenced with `$name` (where `name` is the capturing group name). If the regular expression doesn't match then the timeseries is returned unchanged.
`label_replace` acts on float and histogram samples in the same way.
@ -433,6 +432,11 @@ This example will return timeseries with the values `a:c` at label `service` and
label_replace(up{job="api-server",service="a:c"}, "foo", "$1", "service", "(.*):.*")
```
This second example has the same effect than the first example, and illustrates use of named capturing groups:
```
label_replace(up{job="api-server",service="a:c"}, "foo", "$name", "service", "(?P<name>.*):(?P<version>.*)")
```
## `ln()`
`ln(v instant-vector)` calculates the natural logarithm for all elements in `v`.

3
go.mod
View file

@ -56,6 +56,8 @@ require (
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
github.com/stretchr/testify v1.8.4
github.com/vultr/govultr/v2 v2.17.2
go.opentelemetry.io/collector/pdata v0.66.0
go.opentelemetry.io/collector/semconv v0.81.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0
go.opentelemetry.io/otel v1.16.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0
@ -66,6 +68,7 @@ require (
go.uber.org/atomic v1.11.0
go.uber.org/automaxprocs v1.5.2
go.uber.org/goleak v1.2.1
go.uber.org/multierr v1.8.0
golang.org/x/net v0.12.0
golang.org/x/oauth2 v0.10.0
golang.org/x/sync v0.3.0

9
go.sum
View file

@ -456,7 +456,7 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI=
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@ -805,6 +805,10 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/collector/pdata v0.66.0 h1:UdE5U6MsDNzuiWaXdjGx2lC3ElVqWmN/hiUE8vyvSuM=
go.opentelemetry.io/collector/pdata v0.66.0/go.mod h1:pqyaznLzk21m+1KL6fwOsRryRELL+zNM0qiVSn0MbVc=
go.opentelemetry.io/collector/semconv v0.81.0 h1:lCYNNo3powDvFIaTPP2jDKIrBiV1T92NK4QgL/aHYXw=
go.opentelemetry.io/collector/semconv v0.81.0/go.mod h1:TlYPtzvsXyHOgr5eATi43qEMqwSmIziivJB2uctKswo=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0/go.mod h1:XiYsayHc36K3EByOO6nbAXnAWbrUxdjUROCEeeROOH8=
go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s=
@ -828,6 +832,7 @@ go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lI
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME=
@ -836,6 +841,8 @@ go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=

View file

@ -93,26 +93,8 @@ func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram {
Sum: h.Sum,
}
// TODO(beorn7): This is a straight-forward implementation using merging
// iterators for the original buckets and then adding one merged bucket
// after another to the newly created FloatHistogram. It's well possible
// that a more involved implementation performs much better, which we
// could do if this code path turns out to be performance-critical.
var iInSpan, index int32
for iSpan, iBucket, it := -1, -1, h.floatBucketIterator(true, 0, targetSchema); it.Next(); {
b := it.At()
c.PositiveSpans, c.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket(
b, c.PositiveSpans, c.PositiveBuckets, iSpan, iBucket, iInSpan, index,
)
index = b.Index
}
for iSpan, iBucket, it := -1, -1, h.floatBucketIterator(false, 0, targetSchema); it.Next(); {
b := it.At()
c.NegativeSpans, c.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket(
b, c.NegativeSpans, c.NegativeBuckets, iSpan, iBucket, iInSpan, index,
)
index = b.Index
}
c.PositiveSpans, c.PositiveBuckets = mergeToSchema(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema)
c.NegativeSpans, c.NegativeBuckets = mergeToSchema(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema)
return &c
}
@ -818,6 +800,11 @@ type floatBucketIterator struct {
absoluteStartValue float64 // Never return buckets with an upper bound ≤ this value.
}
func (i *floatBucketIterator) At() Bucket[float64] {
// Need to use i.targetSchema rather than i.baseBucketIterator.schema.
return i.baseBucketIterator.at(i.targetSchema)
}
func (i *floatBucketIterator) Next() bool {
if i.spansIdx >= len(i.spans) {
return false
@ -977,3 +964,72 @@ func (i *allFloatBucketIterator) Next() bool {
func (i *allFloatBucketIterator) At() Bucket[float64] {
return i.currBucket
}
// targetIdx returns the bucket index in the target schema for the given bucket
// index idx in the original schema.
func targetIdx(idx, originSchema, targetSchema int32) int32 {
return ((idx - 1) >> (originSchema - targetSchema)) + 1
}
// mergeToSchema is used to merge a FloatHistogram's Spans and Buckets (no matter if
// positive or negative) from the original schema to the target schema.
// The target schema must be smaller than the original schema.
func mergeToSchema(originSpans []Span, originBuckets []float64, originSchema, targetSchema int32) ([]Span, []float64) {
var (
targetSpans []Span // The spans in the target schema.
targetBuckets []float64 // The buckets in the target schema.
bucketIdx int32 // The index of bucket in the origin schema.
lastTargetBucketIdx int32 // The index of the last added target bucket.
origBucketIdx int // The position of a bucket in originBuckets slice.
)
for _, span := range originSpans {
// Determine the index of the first bucket in this span.
bucketIdx += span.Offset
for j := 0; j < int(span.Length); j++ {
// Determine the index of the bucket in the target schema from the index in the original schema.
targetBucketIdx := targetIdx(bucketIdx, originSchema, targetSchema)
switch {
case len(targetSpans) == 0:
// This is the first span in the targetSpans.
span := Span{
Offset: targetBucketIdx,
Length: 1,
}
targetSpans = append(targetSpans, span)
targetBuckets = append(targetBuckets, originBuckets[0])
lastTargetBucketIdx = targetBucketIdx
case lastTargetBucketIdx == targetBucketIdx:
// The current bucket has to be merged into the same target bucket as the previous bucket.
targetBuckets[len(targetBuckets)-1] += originBuckets[origBucketIdx]
case (lastTargetBucketIdx + 1) == targetBucketIdx:
// The current bucket has to go into a new target bucket,
// and that bucket is next to the previous target bucket,
// so we add it to the current target span.
targetSpans[len(targetSpans)-1].Length++
targetBuckets = append(targetBuckets, originBuckets[origBucketIdx])
lastTargetBucketIdx++
case (lastTargetBucketIdx + 1) < targetBucketIdx:
// The current bucket has to go into a new target bucket,
// and that bucket is separated by a gap from the previous target bucket,
// so we need to add a new target span.
span := Span{
Offset: targetBucketIdx - lastTargetBucketIdx - 1,
Length: 1,
}
targetSpans = append(targetSpans, span)
targetBuckets = append(targetBuckets, originBuckets[origBucketIdx])
lastTargetBucketIdx = targetBucketIdx
}
bucketIdx++
origBucketIdx++
}
}
return targetSpans, targetBuckets
}

View file

@ -2205,3 +2205,50 @@ func TestAllReverseFloatBucketIterator(t *testing.T) {
})
}
}
func TestFloatBucketIteratorTargetSchema(t *testing.T) {
h := FloatHistogram{
Count: 405,
Sum: 1008.4,
Schema: 1,
PositiveSpans: []Span{
{Offset: 0, Length: 4},
{Offset: 1, Length: 3},
{Offset: 2, Length: 3},
},
PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33},
NegativeSpans: []Span{
{Offset: 0, Length: 3},
{Offset: 7, Length: 4},
{Offset: 1, Length: 3},
},
NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33},
}
expPositiveBuckets := []Bucket[float64]{
{Lower: 0.25, Upper: 1, LowerInclusive: false, UpperInclusive: true, Count: 100, Index: 0},
{Lower: 1, Upper: 4, LowerInclusive: false, UpperInclusive: true, Count: 522, Index: 1},
{Lower: 4, Upper: 16, LowerInclusive: false, UpperInclusive: true, Count: 68, Index: 2},
{Lower: 16, Upper: 64, LowerInclusive: false, UpperInclusive: true, Count: 322, Index: 3},
}
expNegativeBuckets := []Bucket[float64]{
{Lower: -1, Upper: -0.25, LowerInclusive: true, UpperInclusive: false, Count: 10, Index: 0},
{Lower: -4, Upper: -1, LowerInclusive: true, UpperInclusive: false, Count: 1264, Index: 1},
{Lower: -64, Upper: -16, LowerInclusive: true, UpperInclusive: false, Count: 184, Index: 3},
{Lower: -256, Upper: -64, LowerInclusive: true, UpperInclusive: false, Count: 791, Index: 4},
{Lower: -1024, Upper: -256, LowerInclusive: true, UpperInclusive: false, Count: 33, Index: 5},
}
it := h.floatBucketIterator(true, 0, -1)
for i, b := range expPositiveBuckets {
require.True(t, it.Next(), "positive iterator exhausted too early")
require.Equal(t, b, it.At(), "bucket %d", i)
}
require.False(t, it.Next(), "positive iterator not exhausted")
it = h.floatBucketIterator(false, 0, -1)
for i, b := range expNegativeBuckets {
require.True(t, it.Next(), "negative iterator exhausted too early")
require.Equal(t, b, it.At(), "bucket %d", i)
}
require.False(t, it.Next(), "negative iterator not exhausted")
}

View file

@ -102,16 +102,22 @@ type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct {
}
func (b baseBucketIterator[BC, IBC]) At() Bucket[BC] {
return b.at(b.schema)
}
// at is an internal version of the exported At to enable using a different
// schema.
func (b baseBucketIterator[BC, IBC]) at(schema int32) Bucket[BC] {
bucket := Bucket[BC]{
Count: BC(b.currCount),
Index: b.currIdx,
}
if b.positive {
bucket.Upper = getBound(b.currIdx, b.schema)
bucket.Lower = getBound(b.currIdx-1, b.schema)
bucket.Upper = getBound(b.currIdx, schema)
bucket.Lower = getBound(b.currIdx-1, schema)
} else {
bucket.Lower = -getBound(b.currIdx, b.schema)
bucket.Upper = -getBound(b.currIdx-1, b.schema)
bucket.Lower = -getBound(b.currIdx, schema)
bucket.Upper = -getBound(b.currIdx-1, schema)
}
bucket.LowerInclusive = bucket.Lower < 0
bucket.UpperInclusive = bucket.Upper > 0

View file

@ -59,7 +59,9 @@ type Parser interface {
Metric(l *labels.Labels) string
// Exemplar writes the exemplar of the current sample into the passed
// exemplar. It returns if an exemplar exists or not.
// exemplar. It can be called repeatedly to retrieve multiple exemplars
// for the same sample. It returns false once all exemplars are
// retrieved (including the case where no exemplars exist at all).
Exemplar(l *exemplar.Exemplar) bool
// Next advances the parser to the next sample. It returns false if no

View file

@ -174,8 +174,10 @@ func (p *OpenMetricsParser) Metric(l *labels.Labels) string {
return s
}
// Exemplar writes the exemplar of the current sample into the passed
// exemplar. It returns the whether an exemplar exists.
// Exemplar writes the exemplar of the current sample into the passed exemplar.
// It returns whether an exemplar exists. As OpenMetrics only ever has one
// exemplar per sample, every call after the first (for the same sample) will
// always return false.
func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool {
if len(p.exemplar) == 0 {
return false
@ -204,6 +206,8 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool {
p.builder.Sort()
e.Labels = p.builder.Labels()
// Wipe exemplar so that future calls return false.
p.exemplar = p.exemplar[:0]
return true
}

View file

@ -105,7 +105,7 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) {
v = float64(s.GetSampleCount())
case -1:
v = s.GetSampleSum()
// Need to detect a summaries without quantile here.
// Need to detect summaries without quantile here.
if len(s.GetQuantile()) == 0 {
p.fieldsDone = true
}
@ -554,18 +554,17 @@ func formatOpenMetricsFloat(f float64) string {
return s + ".0"
}
// isNativeHistogram returns false iff the provided histograms has no sparse
// buckets and a zero threshold of 0 and a zero count of 0. In principle, this
// could still be meant to be a native histogram (with a zero threshold of 0 and
// no observations yet), but for now, we'll treat this case as a conventional
// histogram.
//
// TODO(beorn7): In the final format, there should be an unambiguous way of
// deciding if a histogram should be ingested as a conventional one or a native
// one.
// isNativeHistogram returns false iff the provided histograms has no spans at
// all (neither positive nor negative) and a zero threshold of 0 and a zero
// count of 0. In principle, this could still be meant to be a native histogram
// with a zero threshold of 0 and no observations yet. In that case,
// instrumentation libraries should add a "no-op" span (e.g. length zero, offset
// zero) to signal that the histogram is meant to be parsed as a native
// histogram. Failing to do so will cause Prometheus to parse it as a classic
// histogram as long as no observations have happened.
func isNativeHistogram(h *dto.Histogram) bool {
return len(h.GetNegativeDelta()) > 0 ||
len(h.GetPositiveDelta()) > 0 ||
h.GetZeroCount() > 0 ||
h.GetZeroThreshold() > 0
return len(h.GetPositiveSpan()) > 0 ||
len(h.GetNegativeSpan()) > 0 ||
h.GetZeroThreshold() > 0 ||
h.GetZeroCount() > 0
}

View file

@ -463,6 +463,24 @@ metric: <
>
>
`,
`name: "test_float_histogram_with_zerothreshold_zero"
help: "Test float histogram with a zero threshold of zero."
type: HISTOGRAM
metric: <
histogram: <
sample_count_float: 5.0
sample_sum: 12.1
schema: 3
positive_span: <
offset: 8
length: 2
>
positive_count: 2.0
positive_count: 3.0
>
>
`,
`name: "rpc_durations_seconds"
help: "RPC latency distributions."
@ -499,6 +517,19 @@ metric: <
sample_sum: 1.234
>
>
`,
`name: "empty_histogram"
help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram."
type: HISTOGRAM
metric: <
histogram: <
positive_span: <
offset: 0
length: 0
>
>
>
`,
}
@ -850,6 +881,30 @@ func TestProtobufParse(t *testing.T) {
"foo", "baz",
),
},
{
m: "test_float_histogram_with_zerothreshold_zero",
help: "Test float histogram with a zero threshold of zero.",
},
{
m: "test_float_histogram_with_zerothreshold_zero",
typ: MetricTypeHistogram,
},
{
m: "test_float_histogram_with_zerothreshold_zero",
fhs: &histogram.FloatHistogram{
Count: 5.0,
Sum: 12.1,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: 8, Length: 2},
},
PositiveBuckets: []float64{2.0, 3.0},
NegativeSpans: []histogram.Span{},
},
lset: labels.FromStrings(
"__name__", "test_float_histogram_with_zerothreshold_zero",
),
},
{
m: "rpc_durations_seconds",
help: "RPC latency distributions.",
@ -923,6 +978,25 @@ func TestProtobufParse(t *testing.T) {
"__name__", "without_quantiles_sum",
),
},
{
m: "empty_histogram",
help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.",
},
{
m: "empty_histogram",
typ: MetricTypeHistogram,
},
{
m: "empty_histogram",
shs: &histogram.Histogram{
CounterResetHint: histogram.UnknownCounterReset,
PositiveSpans: []histogram.Span{},
NegativeSpans: []histogram.Span{},
},
lset: labels.FromStrings(
"__name__", "empty_histogram",
),
},
},
},
{
@ -1550,14 +1624,38 @@ func TestProtobufParse(t *testing.T) {
),
},
{ // 67
m: "test_float_histogram_with_zerothreshold_zero",
help: "Test float histogram with a zero threshold of zero.",
},
{ // 68
m: "test_float_histogram_with_zerothreshold_zero",
typ: MetricTypeHistogram,
},
{ // 69
m: "test_float_histogram_with_zerothreshold_zero",
fhs: &histogram.FloatHistogram{
Count: 5.0,
Sum: 12.1,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: 8, Length: 2},
},
PositiveBuckets: []float64{2.0, 3.0},
NegativeSpans: []histogram.Span{},
},
lset: labels.FromStrings(
"__name__", "test_float_histogram_with_zerothreshold_zero",
),
},
{ // 70
m: "rpc_durations_seconds",
help: "RPC latency distributions.",
},
{ // 68
{ // 71
m: "rpc_durations_seconds",
typ: MetricTypeSummary,
},
{ // 69
{ // 72
m: "rpc_durations_seconds_count\xffservice\xffexponential",
v: 262,
lset: labels.FromStrings(
@ -1565,7 +1663,7 @@ func TestProtobufParse(t *testing.T) {
"service", "exponential",
),
},
{ // 70
{ // 73
m: "rpc_durations_seconds_sum\xffservice\xffexponential",
v: 0.00025551262820703587,
lset: labels.FromStrings(
@ -1573,7 +1671,7 @@ func TestProtobufParse(t *testing.T) {
"service", "exponential",
),
},
{ // 71
{ // 74
m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5",
v: 6.442786329648548e-07,
lset: labels.FromStrings(
@ -1582,7 +1680,7 @@ func TestProtobufParse(t *testing.T) {
"service", "exponential",
),
},
{ // 72
{ // 75
m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9",
v: 1.9435742936658396e-06,
lset: labels.FromStrings(
@ -1591,7 +1689,7 @@ func TestProtobufParse(t *testing.T) {
"service", "exponential",
),
},
{ // 73
{ // 76
m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99",
v: 4.0471608667037015e-06,
lset: labels.FromStrings(
@ -1600,28 +1698,47 @@ func TestProtobufParse(t *testing.T) {
"service", "exponential",
),
},
{ // 74
{ // 77
m: "without_quantiles",
help: "A summary without quantiles.",
},
{ // 75
{ // 78
m: "without_quantiles",
typ: MetricTypeSummary,
},
{ // 76
{ // 79
m: "without_quantiles_count",
v: 42,
lset: labels.FromStrings(
"__name__", "without_quantiles_count",
),
},
{ // 77
{ // 80
m: "without_quantiles_sum",
v: 1.234,
lset: labels.FromStrings(
"__name__", "without_quantiles_sum",
),
},
{ // 78
m: "empty_histogram",
help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.",
},
{ // 79
m: "empty_histogram",
typ: MetricTypeHistogram,
},
{ // 80
m: "empty_histogram",
shs: &histogram.Histogram{
CounterResetHint: histogram.UnknownCounterReset,
PositiveSpans: []histogram.Span{},
NegativeSpans: []histogram.Span{},
},
lset: labels.FromStrings(
"__name__", "empty_histogram",
),
},
},
},
}

View file

@ -414,6 +414,9 @@ type Histogram struct {
NegativeDelta []int64 `protobuf:"zigzag64,10,rep,packed,name=negative_delta,json=negativeDelta,proto3" json:"negative_delta,omitempty"`
NegativeCount []float64 `protobuf:"fixed64,11,rep,packed,name=negative_count,json=negativeCount,proto3" json:"negative_count,omitempty"`
// Positive buckets for the native histogram.
// Use a no-op span (offset 0, length 0) for a native histogram without any
// observations yet and with a zero_threshold of 0. Otherwise, it would be
// indistinguishable from a classic histogram.
PositiveSpan []BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan,proto3" json:"positive_span"`
// Use either "positive_delta" or "positive_count", the former for
// regular histograms with integer counts, the latter for float

View file

@ -97,6 +97,9 @@ message Histogram {
repeated double negative_count = 11; // Absolute count of each bucket.
// Positive buckets for the native histogram.
// Use a no-op span (offset 0, length 0) for a native histogram without any
// observations yet and with a zero_threshold of 0. Otherwise, it would be
// indistinguishable from a classic histogram.
repeated BucketSpan positive_span = 12 [(gogoproto.nullable) = false];
// Use either "positive_delta" or "positive_count", the former for
// regular histograms with integer counts, the latter for float

View file

@ -2876,7 +2876,7 @@ func btos(b bool) float64 {
// result of the op operation.
func shouldDropMetricName(op parser.ItemType) bool {
switch op {
case parser.ADD, parser.SUB, parser.DIV, parser.MUL, parser.POW, parser.MOD:
case parser.ADD, parser.SUB, parser.DIV, parser.MUL, parser.POW, parser.MOD, parser.ATAN2:
return true
default:
return false

View file

@ -477,10 +477,10 @@ load 5m
trigNaN{} NaN
eval instant at 5m trigy atan2 trigx
trigy{} 0.4636476090008061
{} 0.4636476090008061
eval instant at 5m trigy atan2 trigNaN
trigy{} NaN
{} NaN
eval instant at 5m 10 atan2 20
0.4636476090008061

View file

@ -53,10 +53,10 @@ func (a nopAppender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.M
func (a nopAppender) Commit() error { return nil }
func (a nopAppender) Rollback() error { return nil }
type sample struct {
type floatSample struct {
metric labels.Labels
t int64
v float64
f float64
}
type histogramSample struct {
@ -69,23 +69,23 @@ type histogramSample struct {
// It can be used as its zero value or be backed by another appender it writes samples through.
type collectResultAppender struct {
next storage.Appender
result []sample
pendingResult []sample
rolledbackResult []sample
pendingExemplars []exemplar.Exemplar
resultExemplars []exemplar.Exemplar
resultFloats []floatSample
pendingFloats []floatSample
rolledbackFloats []floatSample
resultHistograms []histogramSample
pendingHistograms []histogramSample
rolledbackHistograms []histogramSample
pendingMetadata []metadata.Metadata
resultExemplars []exemplar.Exemplar
pendingExemplars []exemplar.Exemplar
resultMetadata []metadata.Metadata
pendingMetadata []metadata.Metadata
}
func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
a.pendingResult = append(a.pendingResult, sample{
a.pendingFloats = append(a.pendingFloats, floatSample{
metric: lset,
t: t,
v: v,
f: v,
})
if ref == 0 {
@ -133,11 +133,11 @@ func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.L
}
func (a *collectResultAppender) Commit() error {
a.result = append(a.result, a.pendingResult...)
a.resultFloats = append(a.resultFloats, a.pendingFloats...)
a.resultExemplars = append(a.resultExemplars, a.pendingExemplars...)
a.resultHistograms = append(a.resultHistograms, a.pendingHistograms...)
a.resultMetadata = append(a.resultMetadata, a.pendingMetadata...)
a.pendingResult = nil
a.pendingFloats = nil
a.pendingExemplars = nil
a.pendingHistograms = nil
a.pendingMetadata = nil
@ -148,9 +148,9 @@ func (a *collectResultAppender) Commit() error {
}
func (a *collectResultAppender) Rollback() error {
a.rolledbackResult = a.pendingResult
a.rolledbackFloats = a.pendingFloats
a.rolledbackHistograms = a.pendingHistograms
a.pendingResult = nil
a.pendingFloats = nil
a.pendingHistograms = nil
if a.next == nil {
return nil
@ -160,14 +160,14 @@ func (a *collectResultAppender) Rollback() error {
func (a *collectResultAppender) String() string {
var sb strings.Builder
for _, s := range a.result {
sb.WriteString(fmt.Sprintf("committed: %s %f %d\n", s.metric, s.v, s.t))
for _, s := range a.resultFloats {
sb.WriteString(fmt.Sprintf("committed: %s %f %d\n", s.metric, s.f, s.t))
}
for _, s := range a.pendingResult {
sb.WriteString(fmt.Sprintf("pending: %s %f %d\n", s.metric, s.v, s.t))
for _, s := range a.pendingFloats {
sb.WriteString(fmt.Sprintf("pending: %s %f %d\n", s.metric, s.f, s.t))
}
for _, s := range a.rolledbackResult {
sb.WriteString(fmt.Sprintf("rolledback: %s %f %d\n", s.metric, s.v, s.t))
for _, s := range a.rolledbackFloats {
sb.WriteString(fmt.Sprintf("rolledback: %s %f %d\n", s.metric, s.f, s.t))
}
return sb.String()
}

View file

@ -1685,7 +1685,7 @@ loop:
// number of samples remaining after relabeling.
added++
if hasExemplar := p.Exemplar(&e); hasExemplar {
for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) {
if !e.HasTs {
e.Ts = t
}

View file

@ -17,6 +17,7 @@ import (
"bytes"
"compress/gzip"
"context"
"encoding/binary"
"fmt"
"io"
"math"
@ -29,6 +30,7 @@ import (
"time"
"github.com/go-kit/log"
"github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
@ -39,6 +41,7 @@ import (
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/model/textparse"
@ -737,12 +740,12 @@ func TestScrapeLoopStop(t *testing.T) {
// We expected 1 actual sample for each scrape plus 5 for report samples.
// At least 2 scrapes were made, plus the final stale markers.
if len(appender.result) < 6*3 || len(appender.result)%6 != 0 {
t.Fatalf("Expected at least 3 scrapes with 6 samples each, got %d samples", len(appender.result))
if len(appender.resultFloats) < 6*3 || len(appender.resultFloats)%6 != 0 {
t.Fatalf("Expected at least 3 scrapes with 6 samples each, got %d samples", len(appender.resultFloats))
}
// All samples in a scrape must have the same timestamp.
var ts int64
for i, s := range appender.result {
for i, s := range appender.resultFloats {
switch {
case i%6 == 0:
ts = s.t
@ -751,9 +754,9 @@ func TestScrapeLoopStop(t *testing.T) {
}
}
// All samples from the last scrape must be stale markers.
for _, s := range appender.result[len(appender.result)-5:] {
if !value.IsStaleNaN(s.v) {
t.Fatalf("Appended last sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(s.v))
for _, s := range appender.resultFloats[len(appender.resultFloats)-5:] {
if !value.IsStaleNaN(s.f) {
t.Fatalf("Appended last sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(s.f))
}
}
}
@ -1189,10 +1192,10 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
// each scrape successful or not.
require.Equal(t, 27, len(appender.result), "Appended samples not as expected:\n%s", appender)
require.Equal(t, 42.0, appender.result[0].v, "Appended first sample not as expected")
require.True(t, value.IsStaleNaN(appender.result[6].v),
"Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.result[6].v))
require.Equal(t, 27, len(appender.resultFloats), "Appended samples not as expected:\n%s", appender)
require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected")
require.True(t, value.IsStaleNaN(appender.resultFloats[6].f),
"Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f))
}
func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
@ -1254,10 +1257,10 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
// each scrape successful or not.
require.Equal(t, 17, len(appender.result), "Appended samples not as expected:\n%s", appender)
require.Equal(t, 42.0, appender.result[0].v, "Appended first sample not as expected")
require.True(t, value.IsStaleNaN(appender.result[6].v),
"Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.result[6].v))
require.Equal(t, 17, len(appender.resultFloats), "Appended samples not as expected:\n%s", appender)
require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected")
require.True(t, value.IsStaleNaN(appender.resultFloats[6].f),
"Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f))
}
func TestScrapeLoopCache(t *testing.T) {
@ -1339,7 +1342,7 @@ func TestScrapeLoopCache(t *testing.T) {
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
// each scrape successful or not.
require.Equal(t, 26, len(appender.result), "Appended samples not as expected:\n%s", appender)
require.Equal(t, 26, len(appender.resultFloats), "Appended samples not as expected:\n%s", appender)
}
func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
@ -1498,11 +1501,11 @@ func TestScrapeLoopAppend(t *testing.T) {
require.NoError(t, err)
require.NoError(t, slApp.Commit())
expected := []sample{
expected := []floatSample{
{
metric: test.expLset,
t: timestamp.FromTime(now),
v: test.expValue,
f: test.expValue,
},
}
@ -1510,11 +1513,11 @@ func TestScrapeLoopAppend(t *testing.T) {
// DeepEqual will report NaNs as being different,
// so replace it with the expected one.
if test.expValue == float64(value.NormalNaN) {
app.result[0].v = expected[0].v
app.resultFloats[0].f = expected[0].f
}
t.Logf("Test:%s", test.title)
require.Equal(t, expected, app.result)
require.Equal(t, expected, app.resultFloats)
}
}
@ -1584,13 +1587,13 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) {
require.NoError(t, slApp.Commit())
require.Equal(t, []sample{
require.Equal(t, []floatSample{
{
metric: labels.FromStrings(tc.expected...),
t: timestamp.FromTime(time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)),
v: 0,
f: 0,
},
}, app.result)
}, app.resultFloats)
})
}
}
@ -1638,15 +1641,15 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
require.NoError(t, err)
require.NoError(t, slApp.Commit())
expected := []sample{
expected := []floatSample{
{
metric: lset,
t: timestamp.FromTime(now),
v: expValue,
f: expValue,
},
}
require.Equal(t, expected, app.result)
require.Equal(t, expected, app.resultFloats)
}
func TestScrapeLoopAppendSampleLimit(t *testing.T) {
@ -1706,14 +1709,14 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
require.Equal(t, 1.0, change, "Unexpected change of sample limit metric: %f", change)
// And verify that we got the samples that fit under the limit.
want := []sample{
want := []floatSample{
{
metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
t: timestamp.FromTime(now),
v: 1,
f: 1,
},
}
require.Equal(t, want, resApp.rolledbackResult, "Appended samples not as expected:\n%s", appender)
require.Equal(t, want, resApp.rolledbackFloats, "Appended samples not as expected:\n%s", appender)
now = time.Now()
slApp = sl.appender(context.Background())
@ -1866,19 +1869,19 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) {
require.NoError(t, slApp.Commit())
// DeepEqual will report NaNs as being different, so replace with a different value.
want := []sample{
want := []floatSample{
{
metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"),
t: timestamp.FromTime(now),
v: 1,
f: 1,
},
{
metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"),
t: timestamp.FromTime(now.Add(time.Minute)),
v: 2,
f: 2,
},
}
require.Equal(t, want, capp.result, "Appended samples not as expected:\n%s", appender)
require.Equal(t, want, capp.resultFloats, "Appended samples not as expected:\n%s", appender)
}
func TestScrapeLoopAppendStaleness(t *testing.T) {
@ -1914,24 +1917,24 @@ func TestScrapeLoopAppendStaleness(t *testing.T) {
require.NoError(t, err)
require.NoError(t, slApp.Commit())
ingestedNaN := math.Float64bits(app.result[1].v)
ingestedNaN := math.Float64bits(app.resultFloats[1].f)
require.Equal(t, value.StaleNaN, ingestedNaN, "Appended stale sample wasn't as expected")
// DeepEqual will report NaNs as being different, so replace with a different value.
app.result[1].v = 42
want := []sample{
app.resultFloats[1].f = 42
want := []floatSample{
{
metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
t: timestamp.FromTime(now),
v: 1,
f: 1,
},
{
metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
t: timestamp.FromTime(now.Add(time.Second)),
v: 42,
f: 42,
},
}
require.Equal(t, want, app.result, "Appended samples not as expected:\n%s", appender)
require.Equal(t, want, app.resultFloats, "Appended samples not as expected:\n%s", appender)
}
func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
@ -1966,40 +1969,44 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
require.NoError(t, err)
require.NoError(t, slApp.Commit())
want := []sample{
want := []floatSample{
{
metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
t: 1000,
v: 1,
f: 1,
},
}
require.Equal(t, want, app.result, "Appended samples not as expected:\n%s", appender)
require.Equal(t, want, app.resultFloats, "Appended samples not as expected:\n%s", appender)
}
func TestScrapeLoopAppendExemplar(t *testing.T) {
tests := []struct {
title string
scrapeText string
contentType string
discoveryLabels []string
samples []sample
floats []floatSample
histograms []histogramSample
exemplars []exemplar.Exemplar
}{
{
title: "Metric without exemplars",
scrapeText: "metric_total{n=\"1\"} 0\n# EOF",
contentType: "application/openmetrics-text",
discoveryLabels: []string{"n", "2"},
samples: []sample{{
floats: []floatSample{{
metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"),
v: 0,
f: 0,
}},
},
{
title: "Metric with exemplars",
scrapeText: "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0\n# EOF",
contentType: "application/openmetrics-text",
discoveryLabels: []string{"n", "2"},
samples: []sample{{
floats: []floatSample{{
metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"),
v: 0,
f: 0,
}},
exemplars: []exemplar.Exemplar{
{Labels: labels.FromStrings("a", "abc"), Value: 1},
@ -2008,10 +2015,11 @@ func TestScrapeLoopAppendExemplar(t *testing.T) {
{
title: "Metric with exemplars and TS",
scrapeText: "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0 10000\n# EOF",
contentType: "application/openmetrics-text",
discoveryLabels: []string{"n", "2"},
samples: []sample{{
floats: []floatSample{{
metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"),
v: 0,
f: 0,
}},
exemplars: []exemplar.Exemplar{
{Labels: labels.FromStrings("a", "abc"), Value: 1, Ts: 10000000, HasTs: true},
@ -2022,18 +2030,117 @@ func TestScrapeLoopAppendExemplar(t *testing.T) {
scrapeText: `metric_total{n="1"} 1 # {t="1"} 1.0 10000
metric_total{n="2"} 2 # {t="2"} 2.0 20000
# EOF`,
samples: []sample{{
contentType: "application/openmetrics-text",
floats: []floatSample{{
metric: labels.FromStrings("__name__", "metric_total", "n", "1"),
v: 1,
f: 1,
}, {
metric: labels.FromStrings("__name__", "metric_total", "n", "2"),
v: 2,
f: 2,
}},
exemplars: []exemplar.Exemplar{
{Labels: labels.FromStrings("t", "1"), Value: 1, Ts: 10000000, HasTs: true},
{Labels: labels.FromStrings("t", "2"), Value: 2, Ts: 20000000, HasTs: true},
},
},
{
title: "Native histogram with two exemplars",
scrapeText: `name: "test_histogram"
help: "Test histogram with many buckets removed to keep it manageable in size."
type: HISTOGRAM
metric: <
histogram: <
sample_count: 175
sample_sum: 0.0008280461746287094
bucket: <
cumulative_count: 2
upper_bound: -0.0004899999999999998
>
bucket: <
cumulative_count: 4
upper_bound: -0.0003899999999999998
exemplar: <
label: <
name: "dummyID"
value: "59727"
>
value: -0.00039
timestamp: <
seconds: 1625851155
nanos: 146848499
>
>
>
bucket: <
cumulative_count: 16
upper_bound: -0.0002899999999999998
exemplar: <
label: <
name: "dummyID"
value: "5617"
>
value: -0.00029
>
>
schema: 3
zero_threshold: 2.938735877055719e-39
zero_count: 2
negative_span: <
offset: -162
length: 1
>
negative_span: <
offset: 23
length: 4
>
negative_delta: 1
negative_delta: 3
negative_delta: -2
negative_delta: -1
negative_delta: 1
positive_span: <
offset: -161
length: 1
>
positive_span: <
offset: 8
length: 3
>
positive_delta: 1
positive_delta: 2
positive_delta: -1
positive_delta: -1
>
timestamp_ms: 1234568
>
`,
contentType: "application/vnd.google.protobuf",
histograms: []histogramSample{{
t: 1234568,
h: &histogram.Histogram{
Count: 175,
ZeroCount: 2,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
}},
exemplars: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true},
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, Ts: 1234568, HasTs: false},
},
},
}
for _, test := range tests {
@ -2069,8 +2176,8 @@ metric_total{n="2"} 2 # {t="2"} 2.0 20000
now := time.Now()
for i := range test.samples {
test.samples[i].t = timestamp.FromTime(now)
for i := range test.floats {
test.floats[i].t = timestamp.FromTime(now)
}
// We need to set the timestamp for expected exemplars that does not have a timestamp.
@ -2080,10 +2187,29 @@ metric_total{n="2"} 2 # {t="2"} 2.0 20000
}
}
_, _, _, err := sl.append(app, []byte(test.scrapeText), "application/openmetrics-text", now)
buf := &bytes.Buffer{}
if test.contentType == "application/vnd.google.protobuf" {
// In case of protobuf, we have to create the binary representation.
pb := &dto.MetricFamily{}
// From text to proto message.
require.NoError(t, proto.UnmarshalText(test.scrapeText, pb))
// From proto message to binary protobuf.
protoBuf, err := proto.Marshal(pb)
require.NoError(t, err)
// Write first length, then binary protobuf.
varintBuf := binary.AppendUvarint(nil, uint64(len(protoBuf)))
buf.Write(varintBuf)
buf.Write(protoBuf)
} else {
buf.WriteString(test.scrapeText)
}
_, _, _, err := sl.append(app, buf.Bytes(), test.contentType, now)
require.NoError(t, err)
require.NoError(t, app.Commit())
require.Equal(t, test.samples, app.result)
require.Equal(t, test.floats, app.resultFloats)
require.Equal(t, test.histograms, app.resultHistograms)
require.Equal(t, test.exemplars, app.resultExemplars)
})
}
@ -2093,12 +2219,12 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) {
scrapeText := []string{`metric_total{n="1"} 1 # {t="1"} 1.0 10000
# EOF`, `metric_total{n="1"} 2 # {t="2"} 2.0 20000
# EOF`}
samples := []sample{{
samples := []floatSample{{
metric: labels.FromStrings("__name__", "metric_total", "n", "1"),
v: 1,
f: 1,
}, {
metric: labels.FromStrings("__name__", "metric_total", "n", "1"),
v: 2,
f: 2,
}}
exemplars := []exemplar.Exemplar{
{Labels: labels.FromStrings("t", "1"), Value: 1, Ts: 10000000, HasTs: true},
@ -2154,7 +2280,7 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) {
require.NoError(t, app.Commit())
}
require.Equal(t, samples, app.result)
require.Equal(t, samples, app.resultFloats)
require.Equal(t, exemplars, app.resultExemplars)
}
@ -2192,7 +2318,7 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
}
sl.run(nil)
require.Equal(t, 0.0, appender.result[0].v, "bad 'up' value")
require.Equal(t, 0.0, appender.resultFloats[0].f, "bad 'up' value")
}
func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
@ -2230,7 +2356,7 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
}
sl.run(nil)
require.Equal(t, 0.0, appender.result[0].v, "bad 'up' value")
require.Equal(t, 0.0, appender.resultFloats[0].f, "bad 'up' value")
}
type errorAppender struct {
@ -2279,14 +2405,14 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T
require.NoError(t, err)
require.NoError(t, slApp.Commit())
want := []sample{
want := []floatSample{
{
metric: labels.FromStrings(model.MetricNameLabel, "normal"),
t: timestamp.FromTime(now),
v: 1,
f: 1,
},
}
require.Equal(t, want, app.result, "Appended samples not as expected:\n%s", appender)
require.Equal(t, want, app.resultFloats, "Appended samples not as expected:\n%s", appender)
require.Equal(t, 4, total)
require.Equal(t, 4, added)
require.Equal(t, 1, seriesAdded)
@ -2598,14 +2724,14 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) {
require.NoError(t, err)
require.NoError(t, slApp.Commit())
want := []sample{
want := []floatSample{
{
metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"),
t: 0,
v: 1,
f: 1,
},
}
require.Equal(t, want, capp.result, "Appended samples not as expected:\n%s", appender)
require.Equal(t, want, capp.resultFloats, "Appended samples not as expected:\n%s", appender)
}
func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
@ -2640,14 +2766,14 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
require.NoError(t, err)
require.NoError(t, slApp.Commit())
want := []sample{
want := []floatSample{
{
metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"),
t: timestamp.FromTime(now),
v: 1,
f: 1,
},
}
require.Equal(t, want, capp.result, "Appended samples not as expected:\n%s", appender)
require.Equal(t, want, capp.resultFloats, "Appended samples not as expected:\n%s", appender)
}
func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {

View file

@ -14,6 +14,7 @@
package remote
import (
"compress/gzip"
"errors"
"fmt"
"io"
@ -26,6 +27,7 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
"github.com/prometheus/common/model"
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
"golang.org/x/exp/slices"
"github.com/prometheus/prometheus/model/exemplar"
@ -38,8 +40,13 @@ import (
"github.com/prometheus/prometheus/tsdb/chunks"
)
// decodeReadLimit is the maximum size of a read request body in bytes.
const decodeReadLimit = 32 * 1024 * 1024
const (
// decodeReadLimit is the maximum size of a read request body in bytes.
decodeReadLimit = 32 * 1024 * 1024
pbContentType = "application/x-protobuf"
jsonContentType = "application/json"
)
type HTTPError struct {
msg string
@ -806,3 +813,56 @@ func DecodeWriteRequest(r io.Reader) (*prompb.WriteRequest, error) {
return &req, nil
}
func DecodeOTLPWriteRequest(r *http.Request) (pmetricotlp.ExportRequest, error) {
contentType := r.Header.Get("Content-Type")
var decoderFunc func(buf []byte) (pmetricotlp.ExportRequest, error)
switch contentType {
case pbContentType:
decoderFunc = func(buf []byte) (pmetricotlp.ExportRequest, error) {
req := pmetricotlp.NewExportRequest()
return req, req.UnmarshalProto(buf)
}
case jsonContentType:
decoderFunc = func(buf []byte) (pmetricotlp.ExportRequest, error) {
req := pmetricotlp.NewExportRequest()
return req, req.UnmarshalJSON(buf)
}
default:
return pmetricotlp.NewExportRequest(), fmt.Errorf("unsupported content type: %s, supported: [%s, %s]", contentType, jsonContentType, pbContentType)
}
reader := r.Body
// Handle compression.
switch r.Header.Get("Content-Encoding") {
case "gzip":
gr, err := gzip.NewReader(reader)
if err != nil {
return pmetricotlp.NewExportRequest(), err
}
reader = gr
case "":
// No compression.
default:
return pmetricotlp.NewExportRequest(), fmt.Errorf("unsupported compression: %s. Only \"gzip\" or no compression supported", r.Header.Get("Content-Encoding"))
}
body, err := io.ReadAll(reader)
if err != nil {
r.Body.Close()
return pmetricotlp.NewExportRequest(), err
}
if err = r.Body.Close(); err != nil {
return pmetricotlp.NewExportRequest(), err
}
otlpReq, err := decoderFunc(body)
if err != nil {
return pmetricotlp.NewExportRequest(), err
}
return otlpReq, nil
}

View file

@ -0,0 +1,23 @@
## Copying from opentelemetry/opentelemetry-collector-contrib
This files in the `prometheus/` and `prometheusremotewrite/` are copied from the OpenTelemetry Project[^1].
This is done instead of adding a go.mod dependency because OpenTelemetry depends on `prometheus/prometheus` and a cyclic dependency will be created. This is just a temporary solution and the long-term solution is to move the required packages from OpenTelemetry into `prometheus/prometheus`.
We don't copy in `./prometheus` through this script because that package imports a collector specific featuregate package we don't want to import. The featuregate package is being removed now, and in the future we will copy this folder too.
To update the dependency is a multi-step process:
1. Vendor the latest `prometheus/prometheus`@`main` into [`opentelemetry/opentelemetry-collector-contrib`](https://github.com/open-telemetry/opentelemetry-collector-contrib)
1. Update the VERSION in `update-copy.sh`.
1. Run `./update-copy.sh`.
### Why copy?
This is because the packages we copy depend on the [`prompb`](https://github.com/prometheus/prometheus/blob/main/prompb) package. While the package is relatively stable, there are still changes. For example, https://github.com/prometheus/prometheus/pull/11935 changed the types.
This means if we depend on the upstream packages directly, we will never able to make the changes like above. Hence we're copying the code for now.
### I need to manually change these files
When we do want to make changes to the types in `prompb`, we might need to edit the files directly. That is OK, please let @gouthamve or @jesusvazquez know so they can take care of updating the upstream code (by vendoring in `prometheus/prometheus` upstream and resolving conflicts) and then will run the copy
script again to keep things updated.
[^1]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheus and https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheusremotewrite

View file

@ -0,0 +1,41 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package normalize
import (
"strings"
"unicode"
)
// Normalizes the specified label to follow Prometheus label names standard
//
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
//
// Labels that start with non-letter rune will be prefixed with "key_"
//
// Exception is made for double-underscores which are allowed
func NormalizeLabel(label string) string {
// Trivial case
if len(label) == 0 {
return label
}
// Replace all non-alphanumeric runes with underscores
label = strings.Map(sanitizeRune, label)
// If label starts with a number, prepend with "key_"
if unicode.IsDigit(rune(label[0])) {
label = "key_" + label
}
return label
}
// Return '_' for anything non-alphanumeric
func sanitizeRune(r rune) rune {
if unicode.IsLetter(r) || unicode.IsDigit(r) {
return r
}
return '_'
}

View file

@ -0,0 +1,19 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package normalize
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestSanitizeDropSanitization(t *testing.T) {
require.Equal(t, "", NormalizeLabel(""))
require.Equal(t, "_test", NormalizeLabel("_test"))
require.Equal(t, "key_0test", NormalizeLabel("0test"))
require.Equal(t, "test", NormalizeLabel("test"))
require.Equal(t, "test__", NormalizeLabel("test_/"))
require.Equal(t, "__test", NormalizeLabel("__test"))
}

View file

@ -0,0 +1,251 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package normalize
import (
"strings"
"unicode"
"go.opentelemetry.io/collector/pdata/pmetric"
)
// The map to translate OTLP units to Prometheus units
// OTLP metrics use the c/s notation as specified at https://ucum.org/ucum.html
// (See also https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/README.md#instrument-units)
// Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units
// OpenMetrics specification for units: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#units-and-base-units
var unitMap = map[string]string{
// Time
"d": "days",
"h": "hours",
"min": "minutes",
"s": "seconds",
"ms": "milliseconds",
"us": "microseconds",
"ns": "nanoseconds",
// Bytes
"By": "bytes",
"KiBy": "kibibytes",
"MiBy": "mebibytes",
"GiBy": "gibibytes",
"TiBy": "tibibytes",
"KBy": "kilobytes",
"MBy": "megabytes",
"GBy": "gigabytes",
"TBy": "terabytes",
"B": "bytes",
"KB": "kilobytes",
"MB": "megabytes",
"GB": "gigabytes",
"TB": "terabytes",
// SI
"m": "meters",
"V": "volts",
"A": "amperes",
"J": "joules",
"W": "watts",
"g": "grams",
// Misc
"Cel": "celsius",
"Hz": "hertz",
"1": "",
"%": "percent",
"$": "dollars",
}
// The map that translates the "per" unit
// Example: s => per second (singular)
var perUnitMap = map[string]string{
"s": "second",
"m": "minute",
"h": "hour",
"d": "day",
"w": "week",
"mo": "month",
"y": "year",
}
// Build a Prometheus-compliant metric name for the specified metric
//
// Metric name is prefixed with specified namespace and underscore (if any).
// Namespace is not cleaned up. Make sure specified namespace follows Prometheus
// naming convention.
//
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
// and https://prometheus.io/docs/practices/naming/#metric-and-label-naming
func BuildPromCompliantName(metric pmetric.Metric, namespace string) string {
// Split metric name in "tokens" (remove all non-alphanumeric)
nameTokens := strings.FieldsFunc(
metric.Name(),
func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) },
)
// Split unit at the '/' if any
unitTokens := strings.SplitN(metric.Unit(), "/", 2)
// Main unit
// Append if not blank, doesn't contain '{}', and is not present in metric name already
if len(unitTokens) > 0 {
mainUnitOtel := strings.TrimSpace(unitTokens[0])
if mainUnitOtel != "" && !strings.ContainsAny(mainUnitOtel, "{}") {
mainUnitProm := CleanUpString(unitMapGetOrDefault(mainUnitOtel))
if mainUnitProm != "" && !contains(nameTokens, mainUnitProm) {
nameTokens = append(nameTokens, mainUnitProm)
}
}
// Per unit
// Append if not blank, doesn't contain '{}', and is not present in metric name already
if len(unitTokens) > 1 && unitTokens[1] != "" {
perUnitOtel := strings.TrimSpace(unitTokens[1])
if perUnitOtel != "" && !strings.ContainsAny(perUnitOtel, "{}") {
perUnitProm := CleanUpString(perUnitMapGetOrDefault(perUnitOtel))
if perUnitProm != "" && !contains(nameTokens, perUnitProm) {
nameTokens = append(append(nameTokens, "per"), perUnitProm)
}
}
}
}
// Append _total for Counters
if metric.Type() == pmetric.MetricTypeSum && metric.Sum().IsMonotonic() {
nameTokens = append(removeItem(nameTokens, "total"), "total")
}
// Append _ratio for metrics with unit "1"
// Some Otel receivers improperly use unit "1" for counters of objects
// See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions
// Until these issues have been fixed, we're appending `_ratio` for gauges ONLY
// Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons)
if metric.Unit() == "1" && metric.Type() == pmetric.MetricTypeGauge {
nameTokens = append(removeItem(nameTokens, "ratio"), "ratio")
}
// Namespace?
if namespace != "" {
nameTokens = append([]string{namespace}, nameTokens...)
}
// Build the string from the tokens, separated with underscores
normalizedName := strings.Join(nameTokens, "_")
// Metric name cannot start with a digit, so prefix it with "_" in this case
if normalizedName != "" && unicode.IsDigit(rune(normalizedName[0])) {
normalizedName = "_" + normalizedName
}
return normalizedName
}
// TrimPromSuffixes trims type and unit prometheus suffixes from a metric name.
// Following the [OpenTelemetry specs] for converting Prometheus Metric points to OTLP.
//
// [OpenTelemetry specs]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#metric-metadata
func TrimPromSuffixes(promName string, metricType pmetric.MetricType, unit string) string {
nameTokens := strings.Split(promName, "_")
if len(nameTokens) == 1 {
return promName
}
nameTokens = removeTypeSuffixes(nameTokens, metricType)
nameTokens = removeUnitSuffixes(nameTokens, unit)
return strings.Join(nameTokens, "_")
}
func removeTypeSuffixes(tokens []string, metricType pmetric.MetricType) []string {
switch metricType {
case pmetric.MetricTypeSum:
// Only counters are expected to have a type suffix at this point.
// for other types, suffixes are removed during scrape.
return removeSuffix(tokens, "total")
default:
return tokens
}
}
func removeUnitSuffixes(nameTokens []string, unit string) []string {
l := len(nameTokens)
unitTokens := strings.Split(unit, "_")
lu := len(unitTokens)
if lu == 0 || l <= lu {
return nameTokens
}
suffixed := true
for i := range unitTokens {
if nameTokens[l-i-1] != unitTokens[lu-i-1] {
suffixed = false
break
}
}
if suffixed {
return nameTokens[:l-lu]
}
return nameTokens
}
func removeSuffix(tokens []string, suffix string) []string {
l := len(tokens)
if tokens[l-1] == suffix {
return tokens[:l-1]
}
return tokens
}
// Clean up specified string so it's Prometheus compliant
func CleanUpString(s string) string {
return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) }), "_")
}
func RemovePromForbiddenRunes(s string) string {
return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != '_' && r != ':' }), "_")
}
// Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit
// Returns the specified unit if not found in unitMap
func unitMapGetOrDefault(unit string) string {
if promUnit, ok := unitMap[unit]; ok {
return promUnit
}
return unit
}
// Retrieve the Prometheus "per" unit corresponding to the specified "per" unit
// Returns the specified unit if not found in perUnitMap
func perUnitMapGetOrDefault(perUnit string) string {
if promPerUnit, ok := perUnitMap[perUnit]; ok {
return promPerUnit
}
return perUnit
}
// Returns whether the slice contains the specified value
func contains(slice []string, value string) bool {
for _, sliceEntry := range slice {
if sliceEntry == value {
return true
}
}
return false
}
// Remove the specified value from the slice
func removeItem(slice []string, value string) []string {
newSlice := make([]string, 0, len(slice))
for _, sliceEntry := range slice {
if sliceEntry != value {
newSlice = append(newSlice, sliceEntry)
}
}
return newSlice
}

View file

@ -0,0 +1,180 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package normalize
import (
"testing"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/pdata/pmetric"
)
func TestByte(t *testing.T) {
require.Equal(t, "system_filesystem_usage_bytes", BuildPromCompliantName(createGauge("system.filesystem.usage", "By"), ""))
}
func TestByteCounter(t *testing.T) {
require.Equal(t, "system_io_bytes_total", BuildPromCompliantName(createCounter("system.io", "By"), ""))
require.Equal(t, "network_transmitted_bytes_total", BuildPromCompliantName(createCounter("network_transmitted_bytes_total", "By"), ""))
}
func TestWhiteSpaces(t *testing.T) {
require.Equal(t, "system_filesystem_usage_bytes", BuildPromCompliantName(createGauge("\t system.filesystem.usage ", " By\t"), ""))
}
func TestNonStandardUnit(t *testing.T) {
require.Equal(t, "system_network_dropped", BuildPromCompliantName(createGauge("system.network.dropped", "{packets}"), ""))
}
func TestNonStandardUnitCounter(t *testing.T) {
require.Equal(t, "system_network_dropped_total", BuildPromCompliantName(createCounter("system.network.dropped", "{packets}"), ""))
}
func TestBrokenUnit(t *testing.T) {
require.Equal(t, "system_network_dropped_packets", BuildPromCompliantName(createGauge("system.network.dropped", "packets"), ""))
require.Equal(t, "system_network_packets_dropped", BuildPromCompliantName(createGauge("system.network.packets.dropped", "packets"), ""))
require.Equal(t, "system_network_packets", BuildPromCompliantName(createGauge("system.network.packets", "packets"), ""))
}
func TestBrokenUnitCounter(t *testing.T) {
require.Equal(t, "system_network_dropped_packets_total", BuildPromCompliantName(createCounter("system.network.dropped", "packets"), ""))
require.Equal(t, "system_network_packets_dropped_total", BuildPromCompliantName(createCounter("system.network.packets.dropped", "packets"), ""))
require.Equal(t, "system_network_packets_total", BuildPromCompliantName(createCounter("system.network.packets", "packets"), ""))
}
func TestRatio(t *testing.T) {
require.Equal(t, "hw_gpu_memory_utilization_ratio", BuildPromCompliantName(createGauge("hw.gpu.memory.utilization", "1"), ""))
require.Equal(t, "hw_fan_speed_ratio", BuildPromCompliantName(createGauge("hw.fan.speed_ratio", "1"), ""))
require.Equal(t, "objects_total", BuildPromCompliantName(createCounter("objects", "1"), ""))
}
func TestHertz(t *testing.T) {
require.Equal(t, "hw_cpu_speed_limit_hertz", BuildPromCompliantName(createGauge("hw.cpu.speed_limit", "Hz"), ""))
}
func TestPer(t *testing.T) {
require.Equal(t, "broken_metric_speed_km_per_hour", BuildPromCompliantName(createGauge("broken.metric.speed", "km/h"), ""))
require.Equal(t, "astro_light_speed_limit_meters_per_second", BuildPromCompliantName(createGauge("astro.light.speed_limit", "m/s"), ""))
}
func TestPercent(t *testing.T) {
require.Equal(t, "broken_metric_success_ratio_percent", BuildPromCompliantName(createGauge("broken.metric.success_ratio", "%"), ""))
require.Equal(t, "broken_metric_success_percent", BuildPromCompliantName(createGauge("broken.metric.success_percent", "%"), ""))
}
func TestDollar(t *testing.T) {
require.Equal(t, "crypto_bitcoin_value_dollars", BuildPromCompliantName(createGauge("crypto.bitcoin.value", "$"), ""))
require.Equal(t, "crypto_bitcoin_value_dollars", BuildPromCompliantName(createGauge("crypto.bitcoin.value.dollars", "$"), ""))
}
func TestEmpty(t *testing.T) {
require.Equal(t, "test_metric_no_unit", BuildPromCompliantName(createGauge("test.metric.no_unit", ""), ""))
require.Equal(t, "test_metric_spaces", BuildPromCompliantName(createGauge("test.metric.spaces", " \t "), ""))
}
func TestUnsupportedRunes(t *testing.T) {
require.Equal(t, "unsupported_metric_temperature_F", BuildPromCompliantName(createGauge("unsupported.metric.temperature", "°F"), ""))
require.Equal(t, "unsupported_metric_weird", BuildPromCompliantName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), ""))
require.Equal(t, "unsupported_metric_redundant_test_per_C", BuildPromCompliantName(createGauge("unsupported.metric.redundant", "__test $/°C"), ""))
}
func TestOtelReceivers(t *testing.T) {
require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", BuildPromCompliantName(createCounter("active_directory.ds.replication.network.io", "By"), ""))
require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", BuildPromCompliantName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), ""))
require.Equal(t, "active_directory_ds_replication_object_rate_per_second", BuildPromCompliantName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), ""))
require.Equal(t, "active_directory_ds_name_cache_hit_rate_percent", BuildPromCompliantName(createGauge("active_directory.ds.name_cache.hit_rate", "%"), ""))
require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time_milliseconds", BuildPromCompliantName(createGauge("active_directory.ds.ldap.bind.last_successful.time", "ms"), ""))
require.Equal(t, "apache_current_connections", BuildPromCompliantName(createGauge("apache.current_connections", "connections"), ""))
require.Equal(t, "apache_workers_connections", BuildPromCompliantName(createGauge("apache.workers", "connections"), ""))
require.Equal(t, "apache_requests_total", BuildPromCompliantName(createCounter("apache.requests", "1"), ""))
require.Equal(t, "bigip_virtual_server_request_count_total", BuildPromCompliantName(createCounter("bigip.virtual_server.request.count", "{requests}"), ""))
require.Equal(t, "system_cpu_utilization_ratio", BuildPromCompliantName(createGauge("system.cpu.utilization", "1"), ""))
require.Equal(t, "system_disk_operation_time_seconds_total", BuildPromCompliantName(createCounter("system.disk.operation_time", "s"), ""))
require.Equal(t, "system_cpu_load_average_15m_ratio", BuildPromCompliantName(createGauge("system.cpu.load_average.15m", "1"), ""))
require.Equal(t, "memcached_operation_hit_ratio_percent", BuildPromCompliantName(createGauge("memcached.operation_hit_ratio", "%"), ""))
require.Equal(t, "mongodbatlas_process_asserts_per_second", BuildPromCompliantName(createGauge("mongodbatlas.process.asserts", "{assertions}/s"), ""))
require.Equal(t, "mongodbatlas_process_journaling_data_files_mebibytes", BuildPromCompliantName(createGauge("mongodbatlas.process.journaling.data_files", "MiBy"), ""))
require.Equal(t, "mongodbatlas_process_network_io_bytes_per_second", BuildPromCompliantName(createGauge("mongodbatlas.process.network.io", "By/s"), ""))
require.Equal(t, "mongodbatlas_process_oplog_rate_gibibytes_per_hour", BuildPromCompliantName(createGauge("mongodbatlas.process.oplog.rate", "GiBy/h"), ""))
require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", BuildPromCompliantName(createGauge("mongodbatlas.process.db.query_targeting.scanned_per_returned", "{scanned}/{returned}"), ""))
require.Equal(t, "nginx_requests", BuildPromCompliantName(createGauge("nginx.requests", "requests"), ""))
require.Equal(t, "nginx_connections_accepted", BuildPromCompliantName(createGauge("nginx.connections_accepted", "connections"), ""))
require.Equal(t, "nsxt_node_memory_usage_kilobytes", BuildPromCompliantName(createGauge("nsxt.node.memory.usage", "KBy"), ""))
require.Equal(t, "redis_latest_fork_microseconds", BuildPromCompliantName(createGauge("redis.latest_fork", "us"), ""))
}
func TestTrimPromSuffixes(t *testing.T) {
require.Equal(t, "active_directory_ds_replication_network_io", TrimPromSuffixes("active_directory_ds_replication_network_io_bytes_total", pmetric.MetricTypeSum, "bytes"))
require.Equal(t, "active_directory_ds_name_cache_hit_rate", TrimPromSuffixes("active_directory_ds_name_cache_hit_rate_percent", pmetric.MetricTypeGauge, "percent"))
require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time", TrimPromSuffixes("active_directory_ds_ldap_bind_last_successful_time_milliseconds", pmetric.MetricTypeGauge, "milliseconds"))
require.Equal(t, "apache_requests", TrimPromSuffixes("apache_requests_total", pmetric.MetricTypeSum, "1"))
require.Equal(t, "system_cpu_utilization", TrimPromSuffixes("system_cpu_utilization_ratio", pmetric.MetricTypeGauge, "ratio"))
require.Equal(t, "mongodbatlas_process_journaling_data_files", TrimPromSuffixes("mongodbatlas_process_journaling_data_files_mebibytes", pmetric.MetricTypeGauge, "mebibytes"))
require.Equal(t, "mongodbatlas_process_network_io", TrimPromSuffixes("mongodbatlas_process_network_io_bytes_per_second", pmetric.MetricTypeGauge, "bytes_per_second"))
require.Equal(t, "mongodbatlas_process_oplog_rate", TrimPromSuffixes("mongodbatlas_process_oplog_rate_gibibytes_per_hour", pmetric.MetricTypeGauge, "gibibytes_per_hour"))
require.Equal(t, "nsxt_node_memory_usage", TrimPromSuffixes("nsxt_node_memory_usage_kilobytes", pmetric.MetricTypeGauge, "kilobytes"))
require.Equal(t, "redis_latest_fork", TrimPromSuffixes("redis_latest_fork_microseconds", pmetric.MetricTypeGauge, "microseconds"))
require.Equal(t, "up", TrimPromSuffixes("up", pmetric.MetricTypeGauge, ""))
// These are not necessarily valid OM units, only tested for the sake of completeness.
require.Equal(t, "active_directory_ds_replication_sync_object_pending", TrimPromSuffixes("active_directory_ds_replication_sync_object_pending_total", pmetric.MetricTypeSum, "{objects}"))
require.Equal(t, "apache_current", TrimPromSuffixes("apache_current_connections", pmetric.MetricTypeGauge, "connections"))
require.Equal(t, "bigip_virtual_server_request_count", TrimPromSuffixes("bigip_virtual_server_request_count_total", pmetric.MetricTypeSum, "{requests}"))
require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", TrimPromSuffixes("mongodbatlas_process_db_query_targeting_scanned_per_returned", pmetric.MetricTypeGauge, "{scanned}/{returned}"))
require.Equal(t, "nginx_connections_accepted", TrimPromSuffixes("nginx_connections_accepted", pmetric.MetricTypeGauge, "connections"))
require.Equal(t, "apache_workers", TrimPromSuffixes("apache_workers_connections", pmetric.MetricTypeGauge, "connections"))
require.Equal(t, "nginx", TrimPromSuffixes("nginx_requests", pmetric.MetricTypeGauge, "requests"))
// Units shouldn't be trimmed if the unit is not a direct match with the suffix, i.e, a suffix "_seconds" shouldn't be removed if unit is "sec" or "s"
require.Equal(t, "system_cpu_load_average_15m_ratio", TrimPromSuffixes("system_cpu_load_average_15m_ratio", pmetric.MetricTypeGauge, "1"))
require.Equal(t, "mongodbatlas_process_asserts_per_second", TrimPromSuffixes("mongodbatlas_process_asserts_per_second", pmetric.MetricTypeGauge, "{assertions}/s"))
require.Equal(t, "memcached_operation_hit_ratio_percent", TrimPromSuffixes("memcached_operation_hit_ratio_percent", pmetric.MetricTypeGauge, "%"))
require.Equal(t, "active_directory_ds_replication_object_rate_per_second", TrimPromSuffixes("active_directory_ds_replication_object_rate_per_second", pmetric.MetricTypeGauge, "{objects}/s"))
require.Equal(t, "system_disk_operation_time_seconds", TrimPromSuffixes("system_disk_operation_time_seconds_total", pmetric.MetricTypeSum, "s"))
}
func TestNamespace(t *testing.T) {
require.Equal(t, "space_test", BuildPromCompliantName(createGauge("test", ""), "space"))
require.Equal(t, "space_test", BuildPromCompliantName(createGauge("#test", ""), "space"))
}
func TestCleanUpString(t *testing.T) {
require.Equal(t, "", CleanUpString(""))
require.Equal(t, "a_b", CleanUpString("a b"))
require.Equal(t, "hello_world", CleanUpString("hello, world!"))
require.Equal(t, "hello_you_2", CleanUpString("hello you 2"))
require.Equal(t, "1000", CleanUpString("$1000"))
require.Equal(t, "", CleanUpString("*+$^=)"))
}
func TestUnitMapGetOrDefault(t *testing.T) {
require.Equal(t, "", unitMapGetOrDefault(""))
require.Equal(t, "seconds", unitMapGetOrDefault("s"))
require.Equal(t, "invalid", unitMapGetOrDefault("invalid"))
}
func TestPerUnitMapGetOrDefault(t *testing.T) {
require.Equal(t, "", perUnitMapGetOrDefault(""))
require.Equal(t, "second", perUnitMapGetOrDefault("s"))
require.Equal(t, "invalid", perUnitMapGetOrDefault("invalid"))
}
func TestRemoveItem(t *testing.T) {
require.Equal(t, []string{}, removeItem([]string{}, "test"))
require.Equal(t, []string{}, removeItem([]string{}, ""))
require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "d"))
require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, ""))
require.Equal(t, []string{"a", "b"}, removeItem([]string{"a", "b", "c"}, "c"))
require.Equal(t, []string{"a", "c"}, removeItem([]string{"a", "b", "c"}, "b"))
require.Equal(t, []string{"b", "c"}, removeItem([]string{"a", "b", "c"}, "a"))
}
func TestBuildPromCompliantName(t *testing.T) {
require.Equal(t, "system_io_bytes_total", BuildPromCompliantName(createCounter("system.io", "By"), ""))
require.Equal(t, "system_network_io_bytes_total", BuildPromCompliantName(createCounter("network.io", "By"), "system"))
require.Equal(t, "_3_14_digits", BuildPromCompliantName(createGauge("3.14 digits", ""), ""))
require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildPromCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), ""))
require.Equal(t, "foo_bar", BuildPromCompliantName(createGauge(":foo::bar", ""), ""))
require.Equal(t, "foo_bar_total", BuildPromCompliantName(createCounter(":foo::bar", ""), ""))
}

View file

@ -0,0 +1,34 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package normalize
import (
"go.opentelemetry.io/collector/pdata/pmetric"
)
var ilm pmetric.ScopeMetrics
func init() {
metrics := pmetric.NewMetrics()
resourceMetrics := metrics.ResourceMetrics().AppendEmpty()
ilm = resourceMetrics.ScopeMetrics().AppendEmpty()
}
// Returns a new Metric of type "Gauge" with specified name and unit
func createGauge(name, unit string) pmetric.Metric {
gauge := ilm.Metrics().AppendEmpty()
gauge.SetName(name)
gauge.SetUnit(unit)
gauge.SetEmptyGauge()
return gauge
}
// Returns a new Metric of type Monotonic Sum with specified name and unit
func createCounter(name, unit string) pmetric.Metric {
counter := ilm.Metrics().AppendEmpty()
counter.SetEmptySum().SetIsMonotonic(true)
counter.SetName(name)
counter.SetUnit(unit)
return counter
}

View file

@ -0,0 +1,559 @@
// DO NOT EDIT. COPIED AS-IS. SEE README.md
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
import (
"encoding/hex"
"fmt"
"log"
"math"
"sort"
"strconv"
"strings"
"time"
"unicode/utf8"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/prompb"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
conventions "go.opentelemetry.io/collector/semconv/v1.6.1"
prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
)
const (
nameStr = "__name__"
sumStr = "_sum"
countStr = "_count"
bucketStr = "_bucket"
leStr = "le"
quantileStr = "quantile"
pInfStr = "+Inf"
createdSuffix = "_created"
// maxExemplarRunes is the maximum number of UTF-8 exemplar characters
// according to the prometheus specification
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars
maxExemplarRunes = 128
// Trace and Span id keys are defined as part of the spec:
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification%2Fmetrics%2Fdatamodel.md#exemplars-2
traceIDKey = "trace_id"
spanIDKey = "span_id"
infoType = "info"
targetMetricName = "target_info"
)
type bucketBoundsData struct {
sig string
bound float64
}
// byBucketBoundsData enables the usage of sort.Sort() with a slice of bucket bounds
type byBucketBoundsData []bucketBoundsData
func (m byBucketBoundsData) Len() int { return len(m) }
func (m byBucketBoundsData) Less(i, j int) bool { return m[i].bound < m[j].bound }
func (m byBucketBoundsData) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
// ByLabelName enables the usage of sort.Sort() with a slice of labels
type ByLabelName []prompb.Label
func (a ByLabelName) Len() int { return len(a) }
func (a ByLabelName) Less(i, j int) bool { return a[i].Name < a[j].Name }
func (a ByLabelName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// addSample finds a TimeSeries in tsMap that corresponds to the label set labels, and add sample to the TimeSeries; it
// creates a new TimeSeries in the map if not found and returns the time series signature.
// tsMap will be unmodified if either labels or sample is nil, but can still be modified if the exemplar is nil.
func addSample(tsMap map[string]*prompb.TimeSeries, sample *prompb.Sample, labels []prompb.Label,
datatype string) string {
if sample == nil || labels == nil || tsMap == nil {
return ""
}
sig := timeSeriesSignature(datatype, &labels)
ts, ok := tsMap[sig]
if ok {
ts.Samples = append(ts.Samples, *sample)
} else {
newTs := &prompb.TimeSeries{
Labels: labels,
Samples: []prompb.Sample{*sample},
}
tsMap[sig] = newTs
}
return sig
}
// addExemplars finds a bucket bound that corresponds to the exemplars value and add the exemplar to the specific sig;
// we only add exemplars if samples are presents
// tsMap is unmodified if either of its parameters is nil and samples are nil.
func addExemplars(tsMap map[string]*prompb.TimeSeries, exemplars []prompb.Exemplar, bucketBoundsData []bucketBoundsData) {
if tsMap == nil || bucketBoundsData == nil || exemplars == nil {
return
}
sort.Sort(byBucketBoundsData(bucketBoundsData))
for _, exemplar := range exemplars {
addExemplar(tsMap, bucketBoundsData, exemplar)
}
}
func addExemplar(tsMap map[string]*prompb.TimeSeries, bucketBounds []bucketBoundsData, exemplar prompb.Exemplar) {
for _, bucketBound := range bucketBounds {
sig := bucketBound.sig
bound := bucketBound.bound
_, ok := tsMap[sig]
if ok {
if tsMap[sig].Samples != nil {
if exemplar.Value <= bound {
tsMap[sig].Exemplars = append(tsMap[sig].Exemplars, exemplar)
return
}
}
}
}
}
// timeSeries return a string signature in the form of:
//
// TYPE-label1-value1- ... -labelN-valueN
//
// the label slice should not contain duplicate label names; this method sorts the slice by label name before creating
// the signature.
func timeSeriesSignature(datatype string, labels *[]prompb.Label) string {
b := strings.Builder{}
b.WriteString(datatype)
sort.Sort(ByLabelName(*labels))
for _, lb := range *labels {
b.WriteString("-")
b.WriteString(lb.GetName())
b.WriteString("-")
b.WriteString(lb.GetValue())
}
return b.String()
}
// createAttributes creates a slice of Cortex Label with OTLP attributes and pairs of string values.
// Unpaired string value is ignored. String pairs overwrites OTLP labels if collision happens, and the overwrite is
// logged. Resultant label names are sanitized.
func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externalLabels map[string]string, extras ...string) []prompb.Label {
// map ensures no duplicate label name
l := map[string]prompb.Label{}
// Ensure attributes are sorted by key for consistent merging of keys which
// collide when sanitized.
labels := make([]prompb.Label, 0, attributes.Len())
attributes.Range(func(key string, value pcommon.Value) bool {
labels = append(labels, prompb.Label{Name: key, Value: value.AsString()})
return true
})
sort.Stable(ByLabelName(labels))
for _, label := range labels {
var finalKey = prometheustranslator.NormalizeLabel(label.Name)
if existingLabel, alreadyExists := l[finalKey]; alreadyExists {
existingLabel.Value = existingLabel.Value + ";" + label.Value
l[finalKey] = existingLabel
} else {
l[finalKey] = prompb.Label{
Name: finalKey,
Value: label.Value,
}
}
}
// Map service.name + service.namespace to job
if serviceName, ok := resource.Attributes().Get(conventions.AttributeServiceName); ok {
val := serviceName.AsString()
if serviceNamespace, ok := resource.Attributes().Get(conventions.AttributeServiceNamespace); ok {
val = fmt.Sprintf("%s/%s", serviceNamespace.AsString(), val)
}
l[model.JobLabel] = prompb.Label{
Name: model.JobLabel,
Value: val,
}
}
// Map service.instance.id to instance
if instance, ok := resource.Attributes().Get(conventions.AttributeServiceInstanceID); ok {
l[model.InstanceLabel] = prompb.Label{
Name: model.InstanceLabel,
Value: instance.AsString(),
}
}
for key, value := range externalLabels {
// External labels have already been sanitized
if _, alreadyExists := l[key]; alreadyExists {
// Skip external labels if they are overridden by metric attributes
continue
}
l[key] = prompb.Label{
Name: key,
Value: value,
}
}
for i := 0; i < len(extras); i += 2 {
if i+1 >= len(extras) {
break
}
_, found := l[extras[i]]
if found {
log.Println("label " + extras[i] + " is overwritten. Check if Prometheus reserved labels are used.")
}
// internal labels should be maintained
name := extras[i]
if !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") {
name = prometheustranslator.NormalizeLabel(name)
}
l[name] = prompb.Label{
Name: name,
Value: extras[i+1],
}
}
s := make([]prompb.Label, 0, len(l))
for _, lb := range l {
s = append(s, lb)
}
return s
}
// isValidAggregationTemporality checks whether an OTel metric has a valid
// aggregation temporality for conversion to a Prometheus metric.
func isValidAggregationTemporality(metric pmetric.Metric) bool {
switch metric.Type() {
case pmetric.MetricTypeGauge, pmetric.MetricTypeSummary:
return true
case pmetric.MetricTypeSum:
return metric.Sum().AggregationTemporality() == pmetric.AggregationTemporalityCumulative
case pmetric.MetricTypeHistogram:
return metric.Histogram().AggregationTemporality() == pmetric.AggregationTemporalityCumulative
case pmetric.MetricTypeExponentialHistogram:
return metric.ExponentialHistogram().AggregationTemporality() == pmetric.AggregationTemporalityCumulative
}
return false
}
// addSingleHistogramDataPoint converts pt to 2 + min(len(ExplicitBounds), len(BucketCount)) + 1 samples. It
// ignore extra buckets if len(ExplicitBounds) > len(BucketCounts)
func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings, tsMap map[string]*prompb.TimeSeries) {
timestamp := convertTimeStamp(pt.Timestamp())
// sum, count, and buckets of the histogram should append suffix to baseName
baseName := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace)
// If the sum is unset, it indicates the _sum metric point should be
// omitted
if pt.HasSum() {
// treat sum as a sample in an individual TimeSeries
sum := &prompb.Sample{
Value: pt.Sum(),
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
sum.Value = math.Float64frombits(value.StaleNaN)
}
sumlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+sumStr)
addSample(tsMap, sum, sumlabels, metric.Type().String())
}
// treat count as a sample in an individual TimeSeries
count := &prompb.Sample{
Value: float64(pt.Count()),
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
count.Value = math.Float64frombits(value.StaleNaN)
}
countlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+countStr)
addSample(tsMap, count, countlabels, metric.Type().String())
// cumulative count for conversion to cumulative histogram
var cumulativeCount uint64
promExemplars := getPromExemplars[pmetric.HistogramDataPoint](pt)
var bucketBounds []bucketBoundsData
// process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1
for i := 0; i < pt.ExplicitBounds().Len() && i < pt.BucketCounts().Len(); i++ {
bound := pt.ExplicitBounds().At(i)
cumulativeCount += pt.BucketCounts().At(i)
bucket := &prompb.Sample{
Value: float64(cumulativeCount),
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
bucket.Value = math.Float64frombits(value.StaleNaN)
}
boundStr := strconv.FormatFloat(bound, 'f', -1, 64)
labels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+bucketStr, leStr, boundStr)
sig := addSample(tsMap, bucket, labels, metric.Type().String())
bucketBounds = append(bucketBounds, bucketBoundsData{sig: sig, bound: bound})
}
// add le=+Inf bucket
infBucket := &prompb.Sample{
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
infBucket.Value = math.Float64frombits(value.StaleNaN)
} else {
infBucket.Value = float64(pt.Count())
}
infLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+bucketStr, leStr, pInfStr)
sig := addSample(tsMap, infBucket, infLabels, metric.Type().String())
bucketBounds = append(bucketBounds, bucketBoundsData{sig: sig, bound: math.Inf(1)})
addExemplars(tsMap, promExemplars, bucketBounds)
// add _created time series if needed
startTimestamp := pt.StartTimestamp()
if settings.ExportCreatedMetric && startTimestamp != 0 {
createdLabels := createAttributes(
resource,
pt.Attributes(),
settings.ExternalLabels,
nameStr,
baseName+createdSuffix,
)
addCreatedTimeSeriesIfNeeded(tsMap, createdLabels, startTimestamp, metric.Type().String())
}
}
type exemplarType interface {
pmetric.ExponentialHistogramDataPoint | pmetric.HistogramDataPoint | pmetric.NumberDataPoint
Exemplars() pmetric.ExemplarSlice
}
func getPromExemplars[T exemplarType](pt T) []prompb.Exemplar {
var promExemplars []prompb.Exemplar
for i := 0; i < pt.Exemplars().Len(); i++ {
exemplar := pt.Exemplars().At(i)
exemplarRunes := 0
promExemplar := &prompb.Exemplar{
Value: exemplar.DoubleValue(),
Timestamp: timestamp.FromTime(exemplar.Timestamp().AsTime()),
}
if traceID := exemplar.TraceID(); !traceID.IsEmpty() {
val := hex.EncodeToString(traceID[:])
exemplarRunes += utf8.RuneCountInString(traceIDKey) + utf8.RuneCountInString(val)
promLabel := prompb.Label{
Name: traceIDKey,
Value: val,
}
promExemplar.Labels = append(promExemplar.Labels, promLabel)
}
if spanID := exemplar.SpanID(); !spanID.IsEmpty() {
val := hex.EncodeToString(spanID[:])
exemplarRunes += utf8.RuneCountInString(spanIDKey) + utf8.RuneCountInString(val)
promLabel := prompb.Label{
Name: spanIDKey,
Value: val,
}
promExemplar.Labels = append(promExemplar.Labels, promLabel)
}
var labelsFromAttributes []prompb.Label
exemplar.FilteredAttributes().Range(func(key string, value pcommon.Value) bool {
val := value.AsString()
exemplarRunes += utf8.RuneCountInString(key) + utf8.RuneCountInString(val)
promLabel := prompb.Label{
Name: key,
Value: val,
}
labelsFromAttributes = append(labelsFromAttributes, promLabel)
return true
})
if exemplarRunes <= maxExemplarRunes {
// only append filtered attributes if it does not cause exemplar
// labels to exceed the max number of runes
promExemplar.Labels = append(promExemplar.Labels, labelsFromAttributes...)
}
promExemplars = append(promExemplars, *promExemplar)
}
return promExemplars
}
// mostRecentTimestampInMetric returns the latest timestamp in a batch of metrics
func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp {
var ts pcommon.Timestamp
// handle individual metric based on type
switch metric.Type() {
case pmetric.MetricTypeGauge:
dataPoints := metric.Gauge().DataPoints()
for x := 0; x < dataPoints.Len(); x++ {
ts = maxTimestamp(ts, dataPoints.At(x).Timestamp())
}
case pmetric.MetricTypeSum:
dataPoints := metric.Sum().DataPoints()
for x := 0; x < dataPoints.Len(); x++ {
ts = maxTimestamp(ts, dataPoints.At(x).Timestamp())
}
case pmetric.MetricTypeHistogram:
dataPoints := metric.Histogram().DataPoints()
for x := 0; x < dataPoints.Len(); x++ {
ts = maxTimestamp(ts, dataPoints.At(x).Timestamp())
}
case pmetric.MetricTypeExponentialHistogram:
dataPoints := metric.ExponentialHistogram().DataPoints()
for x := 0; x < dataPoints.Len(); x++ {
ts = maxTimestamp(ts, dataPoints.At(x).Timestamp())
}
case pmetric.MetricTypeSummary:
dataPoints := metric.Summary().DataPoints()
for x := 0; x < dataPoints.Len(); x++ {
ts = maxTimestamp(ts, dataPoints.At(x).Timestamp())
}
}
return ts
}
func maxTimestamp(a, b pcommon.Timestamp) pcommon.Timestamp {
if a > b {
return a
}
return b
}
// addSingleSummaryDataPoint converts pt to len(QuantileValues) + 2 samples.
func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings,
tsMap map[string]*prompb.TimeSeries) {
timestamp := convertTimeStamp(pt.Timestamp())
// sum and count of the summary should append suffix to baseName
baseName := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace)
// treat sum as a sample in an individual TimeSeries
sum := &prompb.Sample{
Value: pt.Sum(),
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
sum.Value = math.Float64frombits(value.StaleNaN)
}
sumlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+sumStr)
addSample(tsMap, sum, sumlabels, metric.Type().String())
// treat count as a sample in an individual TimeSeries
count := &prompb.Sample{
Value: float64(pt.Count()),
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
count.Value = math.Float64frombits(value.StaleNaN)
}
countlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+countStr)
addSample(tsMap, count, countlabels, metric.Type().String())
// process each percentile/quantile
for i := 0; i < pt.QuantileValues().Len(); i++ {
qt := pt.QuantileValues().At(i)
quantile := &prompb.Sample{
Value: qt.Value(),
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
quantile.Value = math.Float64frombits(value.StaleNaN)
}
percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64)
qtlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName, quantileStr, percentileStr)
addSample(tsMap, quantile, qtlabels, metric.Type().String())
}
// add _created time series if needed
startTimestamp := pt.StartTimestamp()
if settings.ExportCreatedMetric && startTimestamp != 0 {
createdLabels := createAttributes(
resource,
pt.Attributes(),
settings.ExternalLabels,
nameStr,
baseName+createdSuffix,
)
addCreatedTimeSeriesIfNeeded(tsMap, createdLabels, startTimestamp, metric.Type().String())
}
}
// addCreatedTimeSeriesIfNeeded adds {name}_created time series with a single
// sample. If the series exists, then new samples won't be added.
func addCreatedTimeSeriesIfNeeded(
series map[string]*prompb.TimeSeries,
labels []prompb.Label,
startTimestamp pcommon.Timestamp,
metricType string,
) {
sig := timeSeriesSignature(metricType, &labels)
if _, ok := series[sig]; !ok {
series[sig] = &prompb.TimeSeries{
Labels: labels,
Samples: []prompb.Sample{
{ // convert ns to ms
Value: float64(convertTimeStamp(startTimestamp)),
},
},
}
}
}
// addResourceTargetInfo converts the resource to the target info metric
func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timestamp pcommon.Timestamp, tsMap map[string]*prompb.TimeSeries) {
if settings.DisableTargetInfo {
return
}
// Use resource attributes (other than those used for job+instance) as the
// metric labels for the target info metric
attributes := pcommon.NewMap()
resource.Attributes().CopyTo(attributes)
attributes.RemoveIf(func(k string, _ pcommon.Value) bool {
switch k {
case conventions.AttributeServiceName, conventions.AttributeServiceNamespace, conventions.AttributeServiceInstanceID:
// Remove resource attributes used for job + instance
return true
default:
return false
}
})
if attributes.Len() == 0 {
// If we only have job + instance, then target_info isn't useful, so don't add it.
return
}
// create parameters for addSample
name := targetMetricName
if len(settings.Namespace) > 0 {
name = settings.Namespace + "_" + name
}
labels := createAttributes(resource, attributes, settings.ExternalLabels, nameStr, name)
sample := &prompb.Sample{
Value: float64(1),
// convert ns to ms
Timestamp: convertTimeStamp(timestamp),
}
addSample(tsMap, sample, labels, infoType)
}
// convertTimeStamp converts OTLP timestamp in ns to timestamp in ms
func convertTimeStamp(timestamp pcommon.Timestamp) int64 {
return timestamp.AsTime().UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond))
}

View file

@ -0,0 +1,158 @@
// DO NOT EDIT. COPIED AS-IS. SEE README.md
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
import (
"fmt"
"math"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/prompb"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
)
const defaultZeroThreshold = 1e-128
func addSingleExponentialHistogramDataPoint(
metric string,
pt pmetric.ExponentialHistogramDataPoint,
resource pcommon.Resource,
settings Settings,
series map[string]*prompb.TimeSeries,
) error {
labels := createAttributes(
resource,
pt.Attributes(),
settings.ExternalLabels,
model.MetricNameLabel, metric,
)
sig := timeSeriesSignature(
pmetric.MetricTypeExponentialHistogram.String(),
&labels,
)
ts, ok := series[sig]
if !ok {
ts = &prompb.TimeSeries{
Labels: labels,
}
series[sig] = ts
}
histogram, err := exponentialToNativeHistogram(pt)
if err != nil {
return err
}
ts.Histograms = append(ts.Histograms, histogram)
exemplars := getPromExemplars[pmetric.ExponentialHistogramDataPoint](pt)
ts.Exemplars = append(ts.Exemplars, exemplars...)
return nil
}
// exponentialToNativeHistogram translates OTel Exponential Histogram data point
// to Prometheus Native Histogram.
func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prompb.Histogram, error) {
scale := p.Scale()
if scale < -4 || scale > 8 {
return prompb.Histogram{},
fmt.Errorf("cannot convert exponential to native histogram."+
" Scale must be <= 8 and >= -4, was %d", scale)
// TODO: downscale to 8 if scale > 8
}
pSpans, pDeltas := convertBucketsLayout(p.Positive())
nSpans, nDeltas := convertBucketsLayout(p.Negative())
h := prompb.Histogram{
Schema: scale,
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: p.ZeroCount()},
// TODO use zero_threshold, if set, see
// https://github.com/open-telemetry/opentelemetry-proto/pull/441
ZeroThreshold: defaultZeroThreshold,
PositiveSpans: pSpans,
PositiveDeltas: pDeltas,
NegativeSpans: nSpans,
NegativeDeltas: nDeltas,
Timestamp: convertTimeStamp(p.Timestamp()),
}
if p.Flags().NoRecordedValue() {
h.Sum = math.Float64frombits(value.StaleNaN)
h.Count = &prompb.Histogram_CountInt{CountInt: value.StaleNaN}
} else {
if p.HasSum() {
h.Sum = p.Sum()
}
h.Count = &prompb.Histogram_CountInt{CountInt: p.Count()}
}
return h, nil
}
// convertBucketsLayout translates OTel Exponential Histogram dense buckets
// representation to Prometheus Native Histogram sparse bucket representation.
//
// The translation logic is taken from the client_golang `histogram.go#makeBuckets`
// function, see `makeBuckets` https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go
// The bucket indexes conversion was adjusted, since OTel exp. histogram bucket
// index 0 corresponds to the range (1, base] while Prometheus bucket index 0
// to the range (base 1].
func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets) ([]prompb.BucketSpan, []int64) {
bucketCounts := buckets.BucketCounts()
if bucketCounts.Len() == 0 {
return nil, nil
}
var (
spans []prompb.BucketSpan
deltas []int64
prevCount int64
nextBucketIdx int32
)
appendDelta := func(count int64) {
spans[len(spans)-1].Length++
deltas = append(deltas, count-prevCount)
prevCount = count
}
for i := 0; i < bucketCounts.Len(); i++ {
count := int64(bucketCounts.At(i))
if count == 0 {
continue
}
// The offset is adjusted by 1 as described above.
bucketIdx := int32(i) + buckets.Offset() + 1
delta := bucketIdx - nextBucketIdx
if i == 0 || delta > 2 {
// We have to create a new span, either because we are
// at the very beginning, or because we have found a gap
// of more than two buckets. The constant 2 is copied from the logic in
// https://github.com/prometheus/client_golang/blob/27f0506d6ebbb117b6b697d0552ee5be2502c5f2/prometheus/histogram.go#L1296
spans = append(spans, prompb.BucketSpan{
Offset: delta,
Length: 0,
})
} else {
// We have found a small gap (or no gap at all).
// Insert empty buckets as needed.
for j := int32(0); j < delta; j++ {
appendDelta(0)
}
}
appendDelta(count)
nextBucketIdx = bucketIdx + 1
}
return spans, deltas
}

View file

@ -0,0 +1,114 @@
// DO NOT EDIT. COPIED AS-IS. SEE README.md
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
import (
"errors"
"fmt"
"github.com/prometheus/prometheus/prompb"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.uber.org/multierr"
prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
)
type Settings struct {
Namespace string
ExternalLabels map[string]string
DisableTargetInfo bool
ExportCreatedMetric bool
}
// FromMetrics converts pmetric.Metrics to prometheus remote write format.
func FromMetrics(md pmetric.Metrics, settings Settings) (tsMap map[string]*prompb.TimeSeries, errs error) {
tsMap = make(map[string]*prompb.TimeSeries)
resourceMetricsSlice := md.ResourceMetrics()
for i := 0; i < resourceMetricsSlice.Len(); i++ {
resourceMetrics := resourceMetricsSlice.At(i)
resource := resourceMetrics.Resource()
scopeMetricsSlice := resourceMetrics.ScopeMetrics()
// keep track of the most recent timestamp in the ResourceMetrics for
// use with the "target" info metric
var mostRecentTimestamp pcommon.Timestamp
for j := 0; j < scopeMetricsSlice.Len(); j++ {
scopeMetrics := scopeMetricsSlice.At(j)
metricSlice := scopeMetrics.Metrics()
// TODO: decide if instrumentation library information should be exported as labels
for k := 0; k < metricSlice.Len(); k++ {
metric := metricSlice.At(k)
mostRecentTimestamp = maxTimestamp(mostRecentTimestamp, mostRecentTimestampInMetric(metric))
if !isValidAggregationTemporality(metric) {
errs = multierr.Append(errs, fmt.Errorf("invalid temporality and type combination for metric %q", metric.Name()))
continue
}
// handle individual metric based on type
switch metric.Type() {
case pmetric.MetricTypeGauge:
dataPoints := metric.Gauge().DataPoints()
if dataPoints.Len() == 0 {
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
}
for x := 0; x < dataPoints.Len(); x++ {
addSingleGaugeNumberDataPoint(dataPoints.At(x), resource, metric, settings, tsMap)
}
case pmetric.MetricTypeSum:
dataPoints := metric.Sum().DataPoints()
if dataPoints.Len() == 0 {
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
}
for x := 0; x < dataPoints.Len(); x++ {
addSingleSumNumberDataPoint(dataPoints.At(x), resource, metric, settings, tsMap)
}
case pmetric.MetricTypeHistogram:
dataPoints := metric.Histogram().DataPoints()
if dataPoints.Len() == 0 {
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
}
for x := 0; x < dataPoints.Len(); x++ {
addSingleHistogramDataPoint(dataPoints.At(x), resource, metric, settings, tsMap)
}
case pmetric.MetricTypeExponentialHistogram:
dataPoints := metric.ExponentialHistogram().DataPoints()
if dataPoints.Len() == 0 {
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
}
name := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace)
for x := 0; x < dataPoints.Len(); x++ {
errs = multierr.Append(
errs,
addSingleExponentialHistogramDataPoint(
name,
dataPoints.At(x),
resource,
settings,
tsMap,
),
)
}
case pmetric.MetricTypeSummary:
dataPoints := metric.Summary().DataPoints()
if dataPoints.Len() == 0 {
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
}
for x := 0; x < dataPoints.Len(); x++ {
addSingleSummaryDataPoint(dataPoints.At(x), resource, metric, settings, tsMap)
}
default:
errs = multierr.Append(errs, errors.New("unsupported metric type"))
}
}
}
addResourceTargetInfo(resource, settings, mostRecentTimestamp, tsMap)
}
return
}

View file

@ -0,0 +1,104 @@
// DO NOT EDIT. COPIED AS-IS. SEE README.md
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
import (
"math"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/prompb"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
)
// addSingleSumNumberDataPoint converts the Gauge metric data point to a
// Prometheus time series with samples and labels. The result is stored in the
// series map.
func addSingleGaugeNumberDataPoint(
pt pmetric.NumberDataPoint,
resource pcommon.Resource,
metric pmetric.Metric,
settings Settings,
series map[string]*prompb.TimeSeries,
) {
name := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace)
labels := createAttributes(
resource,
pt.Attributes(),
settings.ExternalLabels,
model.MetricNameLabel, name,
)
sample := &prompb.Sample{
// convert ns to ms
Timestamp: convertTimeStamp(pt.Timestamp()),
}
switch pt.ValueType() {
case pmetric.NumberDataPointValueTypeInt:
sample.Value = float64(pt.IntValue())
case pmetric.NumberDataPointValueTypeDouble:
sample.Value = pt.DoubleValue()
}
if pt.Flags().NoRecordedValue() {
sample.Value = math.Float64frombits(value.StaleNaN)
}
addSample(series, sample, labels, metric.Type().String())
}
// addSingleSumNumberDataPoint converts the Sum metric data point to a Prometheus
// time series with samples, labels and exemplars. The result is stored in the
// series map.
func addSingleSumNumberDataPoint(
pt pmetric.NumberDataPoint,
resource pcommon.Resource,
metric pmetric.Metric,
settings Settings,
series map[string]*prompb.TimeSeries,
) {
name := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace)
labels := createAttributes(
resource,
pt.Attributes(),
settings.ExternalLabels,
model.MetricNameLabel, name,
)
sample := &prompb.Sample{
// convert ns to ms
Timestamp: convertTimeStamp(pt.Timestamp()),
}
switch pt.ValueType() {
case pmetric.NumberDataPointValueTypeInt:
sample.Value = float64(pt.IntValue())
case pmetric.NumberDataPointValueTypeDouble:
sample.Value = pt.DoubleValue()
}
if pt.Flags().NoRecordedValue() {
sample.Value = math.Float64frombits(value.StaleNaN)
}
sig := addSample(series, sample, labels, metric.Type().String())
if ts, ok := series[sig]; sig != "" && ok {
exemplars := getPromExemplars[pmetric.NumberDataPoint](pt)
ts.Exemplars = append(ts.Exemplars, exemplars...)
}
// add _created time series if needed
if settings.ExportCreatedMetric && metric.Sum().IsMonotonic() {
startTimestamp := pt.StartTimestamp()
if startTimestamp != 0 {
createdLabels := createAttributes(
resource,
pt.Attributes(),
settings.ExternalLabels,
nameStr,
name+createdSuffix,
)
addCreatedTimeSeriesIfNeeded(series, createdLabels, startTimestamp, metric.Type().String())
}
}
}

View file

@ -0,0 +1,15 @@
#!/bin/bash
OTEL_VERSION=v0.81.0
git clone https://github.com/open-telemetry/opentelemetry-collector-contrib ./tmp
cd ./tmp
git checkout $OTEL_VERSION
cd ..
rm -rf ./prometheusremotewrite/*
cp -r ./tmp/pkg/translator/prometheusremotewrite/*.go ./prometheusremotewrite
rm -rf ./prometheusremotewrite/*_test.go
rm -rf ./tmp
sed -i '' 's#github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus#github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus#g' ./prometheusremotewrite/*.go
sed -i '' '1s#^#// DO NOT EDIT. COPIED AS-IS. SEE README.md\n\n#g' ./prometheusremotewrite/*.go

View file

@ -27,6 +27,7 @@ import (
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage"
otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
)
type writeHandler struct {
@ -119,8 +120,9 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
samplesWithInvalidLabels++
continue
}
var ref storage.SeriesRef
for _, s := range ts.Samples {
_, err = app.Append(0, labels, s.Timestamp, s.Value)
ref, err = app.Append(ref, labels, s.Timestamp, s.Value)
if err != nil {
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
@ -177,3 +179,60 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
return nil
}
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
// writes them to the provided appendable.
func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable) http.Handler {
rwHandler := &writeHandler{
logger: logger,
appendable: appendable,
}
return &otlpWriteHandler{
logger: logger,
rwHandler: rwHandler,
}
}
type otlpWriteHandler struct {
logger log.Logger
rwHandler *writeHandler
}
func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
req, err := DecodeOTLPWriteRequest(r)
if err != nil {
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
prwMetricsMap, errs := otlptranslator.FromMetrics(req.Metrics(), otlptranslator.Settings{})
if errs != nil {
level.Warn(h.logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", errs)
}
prwMetrics := make([]prompb.TimeSeries, 0, len(prwMetricsMap))
for _, ts := range prwMetricsMap {
prwMetrics = append(prwMetrics, *ts)
}
err = h.rwHandler.write(r.Context(), &prompb.WriteRequest{
Timeseries: prwMetrics,
})
switch err {
case nil:
case storage.ErrOutOfOrderSample, storage.ErrOutOfBounds, storage.ErrDuplicateSampleForTimestamp:
// Indicated an out of order sample is a bad request to prevent retries.
http.Error(w, err.Error(), http.StatusBadRequest)
return
default:
level.Error(h.logger).Log("msg", "Error appending remote write", "err", err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
}

View file

@ -14,6 +14,9 @@
package remote
import (
"bytes"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"
@ -22,6 +25,9 @@ import (
common_config "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/labels"
@ -373,3 +379,107 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
err = s.Close()
require.NoError(t, err)
}
func TestOTLPWriteHandler(t *testing.T) {
exportRequest := generateOTLPWriteRequest(t)
buf, err := exportRequest.MarshalProto()
require.NoError(t, err)
req, err := http.NewRequest("", "", bytes.NewReader(buf))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/x-protobuf")
appendable := &mockAppendable{}
handler := NewOTLPWriteHandler(nil, appendable)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
resp := recorder.Result()
require.Equal(t, http.StatusOK, resp.StatusCode)
require.Equal(t, 12, len(appendable.samples)) // 1 (counter) + 1 (gauge) + 1 (target_info) + 7 (hist_bucket) + 2 (hist_sum, hist_count)
require.Equal(t, 1, len(appendable.histograms)) // 1 (exponential histogram)
require.Equal(t, 1, len(appendable.exemplars)) // 1 (exemplar)
}
func generateOTLPWriteRequest(t *testing.T) pmetricotlp.ExportRequest {
d := pmetric.NewMetrics()
// Generate One Counter, One Gauge, One Histogram, One Exponential-Histogram
// with resource attributes: service.name="test-service", service.instance.id="test-instance", host.name="test-host"
// with metric attibute: foo.bar="baz"
timestamp := time.Now()
resourceMetric := d.ResourceMetrics().AppendEmpty()
resourceMetric.Resource().Attributes().PutStr("service.name", "test-service")
resourceMetric.Resource().Attributes().PutStr("service.instance.id", "test-instance")
resourceMetric.Resource().Attributes().PutStr("host.name", "test-host")
scopeMetric := resourceMetric.ScopeMetrics().AppendEmpty()
// Generate One Counter
counterMetric := scopeMetric.Metrics().AppendEmpty()
counterMetric.SetName("test-counter")
counterMetric.SetDescription("test-counter-description")
counterMetric.SetEmptySum()
counterMetric.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
counterMetric.Sum().SetIsMonotonic(true)
counterDataPoint := counterMetric.Sum().DataPoints().AppendEmpty()
counterDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
counterDataPoint.SetDoubleValue(10.0)
counterDataPoint.Attributes().PutStr("foo.bar", "baz")
counterExemplar := counterDataPoint.Exemplars().AppendEmpty()
counterExemplar.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
counterExemplar.SetDoubleValue(10.0)
counterExemplar.SetSpanID(pcommon.SpanID{0, 1, 2, 3, 4, 5, 6, 7})
counterExemplar.SetTraceID(pcommon.TraceID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15})
// Generate One Gauge
gaugeMetric := scopeMetric.Metrics().AppendEmpty()
gaugeMetric.SetName("test-gauge")
gaugeMetric.SetDescription("test-gauge-description")
gaugeMetric.SetEmptyGauge()
gaugeDataPoint := gaugeMetric.Gauge().DataPoints().AppendEmpty()
gaugeDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
gaugeDataPoint.SetDoubleValue(10.0)
gaugeDataPoint.Attributes().PutStr("foo.bar", "baz")
// Generate One Histogram
histogramMetric := scopeMetric.Metrics().AppendEmpty()
histogramMetric.SetName("test-histogram")
histogramMetric.SetDescription("test-histogram-description")
histogramMetric.SetEmptyHistogram()
histogramMetric.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
histogramDataPoint := histogramMetric.Histogram().DataPoints().AppendEmpty()
histogramDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
histogramDataPoint.ExplicitBounds().FromRaw([]float64{0.0, 1.0, 2.0, 3.0, 4.0, 5.0})
histogramDataPoint.BucketCounts().FromRaw([]uint64{2, 2, 2, 2, 2, 2})
histogramDataPoint.SetCount(10)
histogramDataPoint.SetSum(30.0)
histogramDataPoint.Attributes().PutStr("foo.bar", "baz")
// Generate One Exponential-Histogram
exponentialHistogramMetric := scopeMetric.Metrics().AppendEmpty()
exponentialHistogramMetric.SetName("test-exponential-histogram")
exponentialHistogramMetric.SetDescription("test-exponential-histogram-description")
exponentialHistogramMetric.SetEmptyExponentialHistogram()
exponentialHistogramMetric.ExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
exponentialHistogramDataPoint := exponentialHistogramMetric.ExponentialHistogram().DataPoints().AppendEmpty()
exponentialHistogramDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
exponentialHistogramDataPoint.SetScale(2.0)
exponentialHistogramDataPoint.Positive().BucketCounts().FromRaw([]uint64{2, 2, 2, 2, 2})
exponentialHistogramDataPoint.SetZeroCount(2)
exponentialHistogramDataPoint.SetCount(10)
exponentialHistogramDataPoint.SetSum(30.0)
exponentialHistogramDataPoint.Attributes().PutStr("foo.bar", "baz")
return pmetricotlp.NewExportRequestFromMetrics(d)
}

View file

@ -60,7 +60,21 @@ func NewListSeries(lset labels.Labels, s []tsdbutil.Sample) *SeriesEntry {
// NewListChunkSeriesFromSamples returns chunk series entry that allows to iterate over provided samples.
// NOTE: It uses inefficient chunks encoding implementation, not caring about chunk size.
// Use only for testing.
func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]tsdbutil.Sample) *ChunkSeriesEntry {
chksFromSamples := make([]chunks.Meta, 0, len(samples))
for _, s := range samples {
cfs, err := tsdbutil.ChunkFromSamples(s)
if err != nil {
return &ChunkSeriesEntry{
Lset: lset,
ChunkIteratorFn: func(it chunks.Iterator) chunks.Iterator {
return errChunksIterator{err: err}
},
}
}
chksFromSamples = append(chksFromSamples, cfs)
}
return &ChunkSeriesEntry{
Lset: lset,
ChunkIteratorFn: func(it chunks.Iterator) chunks.Iterator {
@ -71,9 +85,7 @@ func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]tsdbutil.Sam
} else {
chks = make([]chunks.Meta, 0, len(samples))
}
for _, s := range samples {
chks = append(chks, tsdbutil.ChunkFromSamples(s))
}
chks = append(chks, chksFromSamples...)
if existing {
lcsi.Reset(chks...)
return lcsi
@ -283,9 +295,10 @@ func NewSeriesToChunkEncoder(series Series) ChunkSeries {
func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator {
var (
chk chunkenc.Chunk
app *RecodingAppender
err error
chk, newChk chunkenc.Chunk
app chunkenc.Appender
err error
recoded bool
)
mint := int64(math.MaxInt64)
maxt := int64(math.MinInt64)
@ -300,20 +313,17 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator {
seriesIter := s.Series.Iterator(nil)
lastType := chunkenc.ValNone
for typ := seriesIter.Next(); typ != chunkenc.ValNone; typ = seriesIter.Next() {
chunkCreated := false
if typ != lastType || i >= seriesToChunkEncoderSplit {
// Create a new chunk if the sample type changed or too many samples in the current one.
chks = appendChunk(chks, mint, maxt, chk)
chunkCreated = true
chk, err = chunkenc.NewEmptyChunk(typ.ChunkEncoding())
if err != nil {
return errChunksIterator{err: err}
}
chkAppender, err := chk.Appender()
app, err = chk.Appender()
if err != nil {
return errChunksIterator{err: err}
}
app = NewRecodingAppender(&chk, chkAppender)
mint = int64(math.MaxInt64)
// maxt is immediately overwritten below which is why setting it here won't make a difference.
i = 0
@ -332,52 +342,33 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator {
app.Append(t, v)
case chunkenc.ValHistogram:
t, h = seriesIter.AtHistogram()
if ok, counterReset := app.AppendHistogram(t, h); !ok {
chks = appendChunk(chks, mint, maxt, chk)
histChunk := chunkenc.NewHistogramChunk()
chunkCreated = true
if counterReset {
histChunk.SetCounterResetHeader(chunkenc.CounterReset)
}
chk = histChunk
chkAppender, err := chk.Appender()
if err != nil {
return errChunksIterator{err: err}
}
mint = int64(math.MaxInt64)
i = 0
app = NewRecodingAppender(&chk, chkAppender)
if ok, _ := app.AppendHistogram(t, h); !ok {
panic("unexpected error while appending histogram")
}
newChk, recoded, app, err = app.AppendHistogram(nil, t, h, false)
if err != nil {
return errChunksIterator{err: err}
}
if chunkCreated && h.CounterResetHint == histogram.GaugeType {
chk.(*chunkenc.HistogramChunk).SetCounterResetHeader(chunkenc.GaugeType)
if newChk != nil {
if !recoded {
chks = appendChunk(chks, mint, maxt, chk)
mint = int64(math.MaxInt64)
// maxt is immediately overwritten below which is why setting it here won't make a difference.
i = 0
}
chk = newChk
}
case chunkenc.ValFloatHistogram:
t, fh = seriesIter.AtFloatHistogram()
if ok, counterReset := app.AppendFloatHistogram(t, fh); !ok {
chks = appendChunk(chks, mint, maxt, chk)
floatHistChunk := chunkenc.NewFloatHistogramChunk()
chunkCreated = true
if counterReset {
floatHistChunk.SetCounterResetHeader(chunkenc.CounterReset)
}
chk = floatHistChunk
chkAppender, err := chk.Appender()
if err != nil {
return errChunksIterator{err: err}
}
mint = int64(math.MaxInt64)
i = 0
app = NewRecodingAppender(&chk, chkAppender)
if ok, _ := app.AppendFloatHistogram(t, fh); !ok {
panic("unexpected error while float appending histogram")
}
newChk, recoded, app, err = app.AppendFloatHistogram(nil, t, fh, false)
if err != nil {
return errChunksIterator{err: err}
}
if chunkCreated && fh.CounterResetHint == histogram.GaugeType {
chk.(*chunkenc.FloatHistogramChunk).SetCounterResetHeader(chunkenc.GaugeType)
if newChk != nil {
if !recoded {
chks = appendChunk(chks, mint, maxt, chk)
mint = int64(math.MaxInt64)
// maxt is immediately overwritten below which is why setting it here won't make a difference.
i = 0
}
chk = newChk
}
default:
return errChunksIterator{err: fmt.Errorf("unknown sample type %s", typ.String())}
@ -482,126 +473,3 @@ func ExpandChunks(iter chunks.Iterator) ([]chunks.Meta, error) {
}
return result, iter.Err()
}
// RecodingAppender is a tsdb.Appender that recodes histogram samples if needed during appends.
// It takes an existing appender and a chunk to which samples are appended.
type RecodingAppender struct {
chk *chunkenc.Chunk
app chunkenc.Appender
}
func NewRecodingAppender(chk *chunkenc.Chunk, app chunkenc.Appender) *RecodingAppender {
return &RecodingAppender{
chk: chk,
app: app,
}
}
// Append appends a float sample to the appender.
func (a *RecodingAppender) Append(t int64, v float64) {
a.app.Append(t, v)
}
// AppendHistogram appends a histogram sample to the underlying chunk.
// The method returns false if the sample cannot be appended and a boolean value set to true
// when it is not appendable because of a counter reset.
// If counterReset is true, okToAppend is always false.
func (a *RecodingAppender) AppendHistogram(t int64, h *histogram.Histogram) (okToAppend, counterReset bool) {
app, ok := a.app.(*chunkenc.HistogramAppender)
if !ok {
return false, false
}
if app.NumSamples() == 0 {
a.app.AppendHistogram(t, h)
return true, false
}
var (
pForwardInserts, nForwardInserts []chunkenc.Insert
pBackwardInserts, nBackwardInserts []chunkenc.Insert
pMergedSpans, nMergedSpans []histogram.Span
)
switch h.CounterResetHint {
case histogram.GaugeType:
pForwardInserts, nForwardInserts,
pBackwardInserts, nBackwardInserts,
pMergedSpans, nMergedSpans,
okToAppend = app.AppendableGauge(h)
default:
pForwardInserts, nForwardInserts, okToAppend, counterReset = app.Appendable(h)
}
if !okToAppend || counterReset {
return false, counterReset
}
if len(pBackwardInserts)+len(nBackwardInserts) > 0 {
h.PositiveSpans = pMergedSpans
h.NegativeSpans = nMergedSpans
app.RecodeHistogram(h, pBackwardInserts, nBackwardInserts)
}
if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
chk, app := app.Recode(
pForwardInserts, nForwardInserts,
h.PositiveSpans, h.NegativeSpans,
)
*a.chk = chk
a.app = app
}
a.app.AppendHistogram(t, h)
return true, counterReset
}
// AppendFloatHistogram appends a float histogram sample to the underlying chunk.
// The method returns false if the sample cannot be appended and a boolean value set to true
// when it is not appendable because of a counter reset.
// If counterReset is true, okToAppend is always false.
func (a *RecodingAppender) AppendFloatHistogram(t int64, fh *histogram.FloatHistogram) (okToAppend, counterReset bool) {
app, ok := a.app.(*chunkenc.FloatHistogramAppender)
if !ok {
return false, false
}
if app.NumSamples() == 0 {
a.app.AppendFloatHistogram(t, fh)
return true, false
}
var (
pForwardInserts, nForwardInserts []chunkenc.Insert
pBackwardInserts, nBackwardInserts []chunkenc.Insert
pMergedSpans, nMergedSpans []histogram.Span
)
switch fh.CounterResetHint {
case histogram.GaugeType:
pForwardInserts, nForwardInserts,
pBackwardInserts, nBackwardInserts,
pMergedSpans, nMergedSpans,
okToAppend = app.AppendableGauge(fh)
default:
pForwardInserts, nForwardInserts, okToAppend, counterReset = app.Appendable(fh)
}
if !okToAppend || counterReset {
return false, counterReset
}
if len(pBackwardInserts)+len(nBackwardInserts) > 0 {
fh.PositiveSpans = pMergedSpans
fh.NegativeSpans = nMergedSpans
app.RecodeHistogramm(fh, pBackwardInserts, nBackwardInserts)
}
if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
chunk, app := app.Recode(
pForwardInserts, nForwardInserts,
fh.PositiveSpans, fh.NegativeSpans,
)
*a.chk = chunk
a.app = app
}
a.app.AppendFloatHistogram(t, fh)
return true, counterReset
}

View file

@ -985,11 +985,25 @@ func (a *appender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Met
// Commit submits the collected samples and purges the batch.
func (a *appender) Commit() error {
if err := a.log(); err != nil {
return err
}
a.clearData()
a.appenderPool.Put(a)
return nil
}
// log logs all pending data to the WAL.
func (a *appender) log() error {
a.mtx.RLock()
defer a.mtx.RUnlock()
var encoder record.Encoder
buf := a.bufPool.Get().([]byte)
defer func() {
a.bufPool.Put(buf) //nolint:staticcheck
}()
if len(a.pendingSeries) > 0 {
buf = encoder.Series(a.pendingSeries, buf)
@ -1051,12 +1065,11 @@ func (a *appender) Commit() error {
}
}
//nolint:staticcheck
a.bufPool.Put(buf)
return a.Rollback()
return nil
}
func (a *appender) Rollback() error {
// clearData clears all pending data.
func (a *appender) clearData() {
a.pendingSeries = a.pendingSeries[:0]
a.pendingSamples = a.pendingSamples[:0]
a.pendingHistograms = a.pendingHistograms[:0]
@ -1065,6 +1078,39 @@ func (a *appender) Rollback() error {
a.sampleSeries = a.sampleSeries[:0]
a.histogramSeries = a.histogramSeries[:0]
a.floatHistogramSeries = a.floatHistogramSeries[:0]
}
func (a *appender) Rollback() error {
// Series are created in-memory regardless of rollback. This means we must
// log them to the WAL, otherwise subsequent commits may reference a series
// which was never written to the WAL.
if err := a.logSeries(); err != nil {
return err
}
a.clearData()
a.appenderPool.Put(a)
return nil
}
// logSeries logs only pending series records to the WAL.
func (a *appender) logSeries() error {
a.mtx.RLock()
defer a.mtx.RUnlock()
if len(a.pendingSeries) > 0 {
buf := a.bufPool.Get().([]byte)
defer func() {
a.bufPool.Put(buf) //nolint:staticcheck
}()
var encoder record.Encoder
buf = encoder.Series(a.pendingSeries, buf)
if err := a.wal.Log(buf); err != nil {
return err
}
buf = buf[:0]
}
return nil
}

View file

@ -333,8 +333,8 @@ func TestRollback(t *testing.T) {
}
}
// Check that the rollback ensured nothing got stored.
require.Equal(t, 0, walSeriesCount, "series should not have been written to WAL")
// Check that only series get stored after calling Rollback.
require.Equal(t, numSeries*3, walSeriesCount, "series should have been written to WAL")
require.Equal(t, 0, walSamplesCount, "samples should not have been written to WAL")
require.Equal(t, 0, walExemplarsCount, "exemplars should not have been written to WAL")
require.Equal(t, 0, walHistogramCount, "histograms should not have been written to WAL")

View file

@ -82,8 +82,20 @@ type Chunk interface {
// Appender adds sample pairs to a chunk.
type Appender interface {
Append(int64, float64)
AppendHistogram(t int64, h *histogram.Histogram)
AppendFloatHistogram(t int64, h *histogram.FloatHistogram)
// AppendHistogram and AppendFloatHistogram append a histogram sample to a histogram or float histogram chunk.
// Appending a histogram may require creating a completely new chunk or recoding (changing) the current chunk.
// The Appender prev is used to determine if there is a counter reset between the previous Appender and the current Appender.
// The Appender prev is optional and only taken into account when the first sample is being appended.
// The bool appendOnly governs what happens when a sample cannot be appended to the current chunk. If appendOnly is true, then
// in such case an error is returned without modifying the chunk. If appendOnly is false, then a new chunk is created or the
// current chunk is recoded to accommodate the sample.
// The returned Chunk c is nil if sample could be appended to the current Chunk, otherwise c is the new Chunk.
// The returned bool isRecoded can be used to distinguish between the new Chunk c being a completely new Chunk
// or the current Chunk recoded to a new Chunk.
// The Appender app that can be used for the next append is always returned.
AppendHistogram(prev *HistogramAppender, t int64, h *histogram.Histogram, appendOny bool) (c Chunk, isRecoded bool, app Appender, err error)
AppendFloatHistogram(prev *FloatHistogramAppender, t int64, h *histogram.FloatHistogram, appendOnly bool) (c Chunk, isRecoded bool, app Appender, err error)
}
// Iterator is a simple iterator that can only get the next value.

View file

@ -15,6 +15,7 @@ package chunkenc
import (
"encoding/binary"
"fmt"
"math"
"github.com/prometheus/prometheus/model/histogram"
@ -80,15 +81,10 @@ func (c *FloatHistogramChunk) Layout() (
return readHistogramChunkLayout(&b)
}
// SetCounterResetHeader sets the counter reset header.
func (c *FloatHistogramChunk) SetCounterResetHeader(h CounterResetHeader) {
setCounterResetHeader(h, c.Bytes())
}
// GetCounterResetHeader returns the info about the first 2 bits of the chunk
// header.
func (c *FloatHistogramChunk) GetCounterResetHeader() CounterResetHeader {
return CounterResetHeader(c.Bytes()[2] & 0b11000000)
return CounterResetHeader(c.Bytes()[2] & CounterResetHeaderMask)
}
// Compact implements the Chunk interface.
@ -174,7 +170,7 @@ func newFloatHistogramIterator(b []byte) *floatHistogramIterator {
// The first 3 bytes contain chunk headers.
// We skip that for actual samples.
_, _ = it.br.readBits(24)
it.counterResetHeader = CounterResetHeader(b[2] & 0b11000000)
it.counterResetHeader = CounterResetHeader(b[2] & CounterResetHeaderMask)
return it
}
@ -198,7 +194,11 @@ type FloatHistogramAppender struct {
}
func (a *FloatHistogramAppender) GetCounterResetHeader() CounterResetHeader {
return CounterResetHeader(a.b.bytes()[2] & 0b11000000)
return CounterResetHeader(a.b.bytes()[2] & CounterResetHeaderMask)
}
func (a *FloatHistogramAppender) setCounterResetHeader(cr CounterResetHeader) {
a.b.bytes()[2] = (a.b.bytes()[2] & (^CounterResetHeaderMask)) | (byte(cr) & CounterResetHeaderMask)
}
func (a *FloatHistogramAppender) NumSamples() int {
@ -211,13 +211,7 @@ func (a *FloatHistogramAppender) Append(int64, float64) {
panic("appended a float sample to a histogram chunk")
}
// AppendHistogram implements Appender. This implementation panics because integer
// histogram samples must never be appended to a float histogram chunk.
func (a *FloatHistogramAppender) AppendHistogram(int64, *histogram.Histogram) {
panic("appended an integer histogram to a float histogram chunk")
}
// Appendable returns whether the chunk can be appended to, and if so whether
// appendable returns whether the chunk can be appended to, and if so whether
// any recoding needs to happen using the provided inserts (in case of any new
// buckets, positive or negative range, respectively). If the sample is a gauge
// histogram, AppendableGauge must be used instead.
@ -232,7 +226,7 @@ func (a *FloatHistogramAppender) AppendHistogram(int64, *histogram.Histogram) {
// The method returns an additional boolean set to true if it is not appendable
// because of a counter reset. If the given sample is stale, it is always ok to
// append. If counterReset is true, okToAppend is always false.
func (a *FloatHistogramAppender) Appendable(h *histogram.FloatHistogram) (
func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) (
positiveInserts, negativeInserts []Insert,
okToAppend, counterReset bool,
) {
@ -288,7 +282,7 @@ func (a *FloatHistogramAppender) Appendable(h *histogram.FloatHistogram) (
return
}
// AppendableGauge returns whether the chunk can be appended to, and if so
// appendableGauge returns whether the chunk can be appended to, and if so
// whether:
// 1. Any recoding needs to happen to the chunk using the provided inserts
// (in case of any new buckets, positive or negative range, respectively).
@ -302,7 +296,7 @@ func (a *FloatHistogramAppender) Appendable(h *histogram.FloatHistogram) (
// - The schema has changed.
// - The threshold for the zero bucket has changed.
// - The last sample in the chunk was stale while the current sample is not stale.
func (a *FloatHistogramAppender) AppendableGauge(h *histogram.FloatHistogram) (
func (a *FloatHistogramAppender) appendableGauge(h *histogram.FloatHistogram) (
positiveInserts, negativeInserts []Insert,
backwardPositiveInserts, backwardNegativeInserts []Insert,
positiveSpans, negativeSpans []histogram.Span,
@ -399,11 +393,11 @@ func counterResetInAnyFloatBucket(oldBuckets []xorValue, newBuckets []float64, o
return false
}
// AppendFloatHistogram appends a float histogram to the chunk. The caller must ensure that
// appendFloatHistogram appends a float histogram to the chunk. The caller must ensure that
// the histogram is properly structured, e.g. the number of buckets used
// corresponds to the number conveyed by the span structures. First call
// Appendable() and act accordingly!
func (a *FloatHistogramAppender) AppendFloatHistogram(t int64, h *histogram.FloatHistogram) {
func (a *FloatHistogramAppender) appendFloatHistogram(t int64, h *histogram.FloatHistogram) {
var tDelta int64
num := binary.BigEndian.Uint16(a.b.bytes())
@ -501,12 +495,12 @@ func (a *FloatHistogramAppender) writeXorValue(old *xorValue, v float64) {
old.value = v
}
// Recode converts the current chunk to accommodate an expansion of the set of
// recode converts the current chunk to accommodate an expansion of the set of
// (positive and/or negative) buckets used, according to the provided inserts,
// resulting in the honoring of the provided new positive and negative spans. To
// continue appending, use the returned Appender rather than the receiver of
// this method.
func (a *FloatHistogramAppender) Recode(
func (a *FloatHistogramAppender) recode(
positiveInserts, negativeInserts []Insert,
positiveSpans, negativeSpans []histogram.Span,
) (Chunk, Appender) {
@ -519,8 +513,9 @@ func (a *FloatHistogramAppender) Recode(
hc := NewFloatHistogramChunk()
app, err := hc.Appender()
if err != nil {
panic(err)
panic(err) // This should never happen for an empty float histogram chunk.
}
happ := app.(*FloatHistogramAppender)
numPositiveBuckets, numNegativeBuckets := countSpans(positiveSpans), countSpans(negativeSpans)
for it.Next() == ValFloatHistogram {
@ -546,16 +541,16 @@ func (a *FloatHistogramAppender) Recode(
if len(negativeInserts) > 0 {
hOld.NegativeBuckets = insert(hOld.NegativeBuckets, negativeBuckets, negativeInserts, false)
}
app.AppendFloatHistogram(tOld, hOld)
happ.appendFloatHistogram(tOld, hOld)
}
hc.SetCounterResetHeader(CounterResetHeader(byts[2] & 0b11000000))
happ.setCounterResetHeader(CounterResetHeader(byts[2] & CounterResetHeaderMask))
return hc, app
}
// RecodeHistogramm converts the current histogram (in-place) to accommodate an expansion of the set of
// recodeHistogram converts the current histogram (in-place) to accommodate an expansion of the set of
// (positive and/or negative) buckets used.
func (a *FloatHistogramAppender) RecodeHistogramm(
func (a *FloatHistogramAppender) recodeHistogram(
fh *histogram.FloatHistogram,
pBackwardInter, nBackwardInter []Insert,
) {
@ -569,6 +564,111 @@ func (a *FloatHistogramAppender) RecodeHistogramm(
}
}
func (a *FloatHistogramAppender) AppendHistogram(*HistogramAppender, int64, *histogram.Histogram, bool) (Chunk, bool, Appender, error) {
panic("appended a histogram sample to a float histogram chunk")
}
func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppender, t int64, h *histogram.FloatHistogram, appendOnly bool) (Chunk, bool, Appender, error) {
if a.NumSamples() == 0 {
a.appendFloatHistogram(t, h)
if h.CounterResetHint == histogram.GaugeType {
a.setCounterResetHeader(GaugeType)
return nil, false, a, nil
}
if prev != nil && h.CounterResetHint != histogram.CounterReset {
// This is a new chunk, but continued from a previous one. We need to calculate the reset header unless already set.
_, _, _, counterReset := prev.appendable(h)
if counterReset {
a.setCounterResetHeader(CounterReset)
} else {
a.setCounterResetHeader(NotCounterReset)
}
} else {
// Honor the explicit counter reset hint.
a.setCounterResetHeader(CounterResetHeader(h.CounterResetHint))
}
return nil, false, a, nil
}
// Adding counter-like histogram.
if h.CounterResetHint != histogram.GaugeType {
pForwardInserts, nForwardInserts, okToAppend, counterReset := a.appendable(h)
if !okToAppend || counterReset {
if appendOnly {
if !okToAppend {
return nil, false, a, fmt.Errorf("float histogram schema change")
}
return nil, false, a, fmt.Errorf("float histogram counter reset")
}
newChunk := NewFloatHistogramChunk()
app, err := newChunk.Appender()
if err != nil {
panic(err) // This should never happen for an empty float histogram chunk.
}
happ := app.(*FloatHistogramAppender)
if counterReset {
happ.setCounterResetHeader(CounterReset)
}
happ.appendFloatHistogram(t, h)
return newChunk, false, app, nil
}
if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
if appendOnly {
return nil, false, a, fmt.Errorf("float histogram layout change with %d positive and %d negative forwards inserts", len(pForwardInserts), len(nForwardInserts))
}
chk, app := a.recode(
pForwardInserts, nForwardInserts,
h.PositiveSpans, h.NegativeSpans,
)
app.(*FloatHistogramAppender).appendFloatHistogram(t, h)
return chk, true, app, nil
}
a.appendFloatHistogram(t, h)
return nil, false, a, nil
}
// Adding gauge histogram.
pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, pMergedSpans, nMergedSpans, okToAppend := a.appendableGauge(h)
if !okToAppend {
if appendOnly {
return nil, false, a, fmt.Errorf("float gauge histogram schema change")
}
newChunk := NewFloatHistogramChunk()
app, err := newChunk.Appender()
if err != nil {
panic(err) // This should never happen for an empty float histogram chunk.
}
happ := app.(*FloatHistogramAppender)
happ.setCounterResetHeader(GaugeType)
happ.appendFloatHistogram(t, h)
return newChunk, false, app, nil
}
if len(pBackwardInserts)+len(nBackwardInserts) > 0 {
if appendOnly {
return nil, false, a, fmt.Errorf("float gauge histogram layout change with %d positive and %d negative backwards inserts", len(pBackwardInserts), len(nBackwardInserts))
}
h.PositiveSpans = pMergedSpans
h.NegativeSpans = nMergedSpans
a.recodeHistogram(h, pBackwardInserts, nBackwardInserts)
}
if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
if appendOnly {
return nil, false, a, fmt.Errorf("float gauge histogram layout change with %d positive and %d negative forwards inserts", len(pForwardInserts), len(nForwardInserts))
}
chk, app := a.recode(
pForwardInserts, nForwardInserts,
h.PositiveSpans, h.NegativeSpans,
)
app.(*FloatHistogramAppender).appendFloatHistogram(t, h)
return chk, true, app, nil
}
a.appendFloatHistogram(t, h)
return nil, false, a, nil
}
type floatHistogramIterator struct {
br bstreamReader
numTotal uint16
@ -657,7 +757,7 @@ func (it *floatHistogramIterator) Reset(b []byte) {
it.numTotal = binary.BigEndian.Uint16(b)
it.numRead = 0
it.counterResetHeader = CounterResetHeader(b[2] & 0b11000000)
it.counterResetHeader = CounterResetHeader(b[2] & CounterResetHeaderMask)
it.t, it.tDelta = 0, 0
it.cnt, it.zCnt, it.sum = xorValue{}, xorValue{}, xorValue{}

View file

@ -53,7 +53,9 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) {
},
NegativeBuckets: []int64{2, 1, -1, -1}, // counts: 2, 3, 2, 1 (total 8)
}
app.AppendFloatHistogram(ts, h.ToFloat())
chk, _, app, err := app.AppendFloatHistogram(nil, ts, h.ToFloat(), false)
require.NoError(t, err)
require.Nil(t, chk)
exp = append(exp, floatResult{t: ts, h: h.ToFloat()})
require.Equal(t, 1, c.NumSamples())
@ -65,7 +67,9 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) {
h.Sum = 24.4
h.PositiveBuckets = []int64{5, -2, 1, -2} // counts: 5, 3, 4, 2 (total 14)
h.NegativeBuckets = []int64{4, -1, 1, -1} // counts: 4, 3, 4, 4 (total 15)
app.AppendFloatHistogram(ts, h.ToFloat())
chk, _, _, err = app.AppendFloatHistogram(nil, ts, h.ToFloat(), false)
require.NoError(t, err)
require.Nil(t, chk)
expH := h.ToFloat()
expH.CounterResetHint = histogram.NotCounterReset
exp = append(exp, floatResult{t: ts, h: expH})
@ -82,7 +86,9 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) {
h.Sum = 24.4
h.PositiveBuckets = []int64{6, 1, -3, 6} // counts: 6, 7, 4, 10 (total 27)
h.NegativeBuckets = []int64{5, 1, -2, 3} // counts: 5, 6, 4, 7 (total 22)
app.AppendFloatHistogram(ts, h.ToFloat())
chk, _, _, err = app.AppendFloatHistogram(nil, ts, h.ToFloat(), false)
require.NoError(t, err)
require.Nil(t, chk)
expH = h.ToFloat()
expH.CounterResetHint = histogram.NotCounterReset
exp = append(exp, floatResult{t: ts, h: expH})
@ -170,7 +176,9 @@ func TestFloatHistogramChunkBucketChanges(t *testing.T) {
NegativeBuckets: []int64{1},
}
app.AppendFloatHistogram(ts1, h1.ToFloat())
chk, _, app, err := app.AppendFloatHistogram(nil, ts1, h1.ToFloat(), false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 1, c.NumSamples())
// Add a new histogram that has expanded buckets.
@ -196,14 +204,15 @@ func TestFloatHistogramChunkBucketChanges(t *testing.T) {
h2.NegativeBuckets = []int64{2, -1} // 2 1 (total 3)
// This is how span changes will be handled.
hApp, _ := app.(*FloatHistogramAppender)
posInterjections, negInterjections, ok, cr := hApp.Appendable(h2.ToFloat())
posInterjections, negInterjections, ok, cr := hApp.appendable(h2.ToFloat())
require.Greater(t, len(posInterjections), 0)
require.Greater(t, len(negInterjections), 0)
require.True(t, ok) // Only new buckets came in.
require.False(t, cr)
c, app = hApp.Recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans)
app.AppendFloatHistogram(ts2, h2.ToFloat())
c, app = hApp.recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans)
chk, _, _, err = app.AppendFloatHistogram(nil, ts2, h2.ToFloat(), false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 2, c.NumSamples())
// Because the 2nd histogram has expanded buckets, we should expect all
@ -230,49 +239,61 @@ func TestFloatHistogramChunkBucketChanges(t *testing.T) {
}
func TestFloatHistogramChunkAppendable(t *testing.T) {
c := Chunk(NewFloatHistogramChunk())
setup := func() (Chunk, *FloatHistogramAppender, int64, *histogram.FloatHistogram) {
c := Chunk(NewFloatHistogramChunk())
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
ts := int64(1234567890)
h1 := &histogram.FloatHistogram{
Count: 5,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1},
ts := int64(1234567890)
h1 := &histogram.FloatHistogram{
Count: 5,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1},
}
chk, _, app, err := app.AppendFloatHistogram(nil, ts, h1.Copy(), false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 1, c.NumSamples())
require.Equal(t, UnknownCounterReset, c.(*FloatHistogramChunk).GetCounterResetHeader())
return c, app.(*FloatHistogramAppender), ts, h1
}
app.AppendFloatHistogram(ts, h1.Copy())
require.Equal(t, 1, c.NumSamples())
hApp, _ := app.(*FloatHistogramAppender)
{ // Schema change.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.Schema++
_, _, ok, _ := hApp.Appendable(h2)
_, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // Zero threshold change.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.ZeroThreshold += 0.1
_, _, ok, _ := hApp.Appendable(h2)
_, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // New histogram that has more buckets.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
@ -285,14 +306,18 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
h2.Sum = 30
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1}
posInterjections, negInterjections, ok, cr := hApp.Appendable(h2)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Greater(t, len(posInterjections), 0)
require.Equal(t, 0, len(negInterjections))
require.True(t, ok) // Only new buckets came in.
require.False(t, cr)
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // New histogram that has a bucket missing.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 2},
@ -303,26 +328,32 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
h2.Sum = 21
h2.PositiveBuckets = []float64{6, 3, 2, 4, 5, 1}
posInterjections, negInterjections, ok, cr := hApp.Appendable(h2)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Equal(t, 0, len(posInterjections))
require.Equal(t, 0, len(negInterjections))
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
}
{ // New histogram that has a counter reset while buckets are same.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.Sum = 23
h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1}
posInterjections, negInterjections, ok, cr := hApp.Appendable(h2)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Equal(t, 0, len(posInterjections))
require.Equal(t, 0, len(negInterjections))
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
}
{ // New histogram that has a counter reset while new buckets were added.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
@ -333,14 +364,17 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
h2.Sum = 29
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 0}
posInterjections, negInterjections, ok, cr := hApp.Appendable(h2)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Equal(t, 0, len(posInterjections))
require.Equal(t, 0, len(negInterjections))
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
}
{
c, hApp, ts, h1 := setup()
// New histogram that has a counter reset while new buckets were
// added before the first bucket and reset on first bucket. (to
// catch the edge case where the new bucket should be forwarded
@ -357,14 +391,57 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
h2.Sum = 26
h2.PositiveBuckets = []float64{1, 2, 5, 3, 3, 2, 4, 5, 1}
posInterjections, negInterjections, ok, cr := hApp.Appendable(h2)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Equal(t, 0, len(posInterjections))
require.Equal(t, 0, len(negInterjections))
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
}
}
func assertNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) {
oldChunkBytes := oldChunk.Bytes()
newChunk, recoded, newAppender, err := hApp.AppendFloatHistogram(nil, ts, h, false)
require.Equal(t, oldChunkBytes, oldChunk.Bytes()) // Sanity check that previous chunk is untouched.
require.NoError(t, err)
require.NotNil(t, newChunk)
require.False(t, recoded)
require.NotEqual(t, oldChunk, newChunk)
require.Equal(t, expectHeader, newChunk.(*FloatHistogramChunk).GetCounterResetHeader())
require.NotNil(t, newAppender)
require.NotEqual(t, hApp, newAppender)
assertSampleCount(t, newChunk, 1, ValFloatHistogram)
}
func assertNoNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) {
oldChunkBytes := oldChunk.Bytes()
newChunk, recoded, newAppender, err := hApp.AppendFloatHistogram(nil, ts, h, false)
require.NotEqual(t, oldChunkBytes, oldChunk.Bytes()) // Sanity check that previous chunk is untouched.
require.NoError(t, err)
require.Nil(t, newChunk)
require.False(t, recoded)
require.Equal(t, expectHeader, oldChunk.(*FloatHistogramChunk).GetCounterResetHeader())
require.NotNil(t, newAppender)
require.Equal(t, hApp, newAppender)
assertSampleCount(t, oldChunk, 2, ValFloatHistogram)
}
func assertRecodedFloatHistogramChunkOnAppend(t *testing.T, prevChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) {
prevChunkBytes := prevChunk.Bytes()
newChunk, recoded, newAppender, err := hApp.AppendFloatHistogram(nil, ts, h, false)
require.Equal(t, prevChunkBytes, prevChunk.Bytes()) // Sanity check that previous chunk is untouched. This may change in the future if we implement in-place recoding.
require.NoError(t, err)
require.NotNil(t, newChunk)
require.True(t, recoded)
require.NotEqual(t, prevChunk, newChunk)
require.Equal(t, expectHeader, newChunk.(*FloatHistogramChunk).GetCounterResetHeader())
require.NotNil(t, newAppender)
require.NotEqual(t, hApp, newAppender)
assertSampleCount(t, newChunk, 2, ValFloatHistogram)
}
func TestFloatHistogramChunkAppendableWithEmptySpan(t *testing.T) {
h1 := &histogram.FloatHistogram{
Schema: 0,
@ -412,11 +489,12 @@ func TestFloatHistogramChunkAppendableWithEmptySpan(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
app.AppendFloatHistogram(1, h1)
_, _, _, err = app.AppendFloatHistogram(nil, 1, h1, true)
require.NoError(t, err)
require.Equal(t, 1, c.NumSamples())
hApp, _ := app.(*FloatHistogramAppender)
pI, nI, okToAppend, counterReset := hApp.Appendable(h2)
pI, nI, okToAppend, counterReset := hApp.appendable(h2)
require.Empty(t, pI)
require.Empty(t, nI)
require.True(t, okToAppend)
@ -424,51 +502,62 @@ func TestFloatHistogramChunkAppendableWithEmptySpan(t *testing.T) {
}
func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
c := Chunk(NewFloatHistogramChunk())
setup := func() (Chunk, *FloatHistogramAppender, int64, *histogram.FloatHistogram) {
c := Chunk(NewFloatHistogramChunk())
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
ts := int64(1234567890)
h1 := &histogram.FloatHistogram{
Count: 5,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1},
ts := int64(1234567890)
h1 := &histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Count: 5,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1},
}
chk, _, app, err := app.AppendFloatHistogram(nil, ts, h1.Copy(), false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 1, c.NumSamples())
require.Equal(t, GaugeType, c.(*FloatHistogramChunk).GetCounterResetHeader())
return c, app.(*FloatHistogramAppender), ts, h1
}
app.AppendFloatHistogram(ts, h1.Copy())
require.Equal(t, 1, c.NumSamples())
c.(*FloatHistogramChunk).SetCounterResetHeader(GaugeType)
{ // Schema change.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.Schema++
hApp, _ := app.(*FloatHistogramAppender)
_, _, _, _, _, _, ok := hApp.AppendableGauge(h2)
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
require.False(t, ok)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
}
{ // Zero threshold change.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.ZeroThreshold += 0.1
hApp, _ := app.(*FloatHistogramAppender)
_, _, _, _, _, _, ok := hApp.AppendableGauge(h2)
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
require.False(t, ok)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
}
{ // New histogram that has more buckets.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
@ -481,16 +570,18 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
h2.Sum = 30
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1}
hApp, _ := app.(*FloatHistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
}
{ // New histogram that has buckets missing.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 2},
@ -503,16 +594,18 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
h2.Sum--
h2.PositiveBuckets = []float64{6, 3, 3, 2, 5, 1}
hApp, _ := app.(*FloatHistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Len(t, pI, 0)
require.Len(t, nI, 0)
require.Greater(t, len(pBackwardI), 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
}
{ // New histogram that has a bucket missing and new buckets.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 2},
@ -523,30 +616,34 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
h2.Sum = 21
h2.PositiveBuckets = []float64{6, 3, 2, 4, 5, 1}
hApp, _ := app.(*FloatHistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Greater(t, len(pBackwardI), 0)
require.Len(t, nI, 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
}
{ // New histogram that has a counter reset while buckets are same.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.Sum = 23
h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1}
hApp, _ := app.(*FloatHistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Len(t, pI, 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
}
{ // New histogram that has a counter reset while new buckets were added.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
@ -557,18 +654,20 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
h2.Sum = 29
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 0}
hApp, _ := app.(*FloatHistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
}
{
// New histogram that has a counter reset while new buckets were
// added before the first bucket and reset on first bucket.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: -3, Length: 2},
@ -581,12 +680,13 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
h2.Sum = 26
h2.PositiveBuckets = []float64{1, 2, 5, 3, 3, 2, 4, 5, 1}
hApp, _ := app.(*FloatHistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
}
}

View file

@ -15,6 +15,7 @@ package chunkenc
import (
"encoding/binary"
"fmt"
"math"
"github.com/prometheus/prometheus/model/histogram"
@ -88,26 +89,13 @@ const (
UnknownCounterReset CounterResetHeader = 0b00000000
)
// setCounterResetHeader sets the counter reset header of the chunk
// The third byte of the chunk is the counter reset header.
func setCounterResetHeader(h CounterResetHeader, bytes []byte) {
switch h {
case CounterReset, NotCounterReset, GaugeType, UnknownCounterReset:
bytes[2] = (bytes[2] & 0b00111111) | byte(h)
default:
panic("invalid CounterResetHeader type")
}
}
// SetCounterResetHeader sets the counter reset header.
func (c *HistogramChunk) SetCounterResetHeader(h CounterResetHeader) {
setCounterResetHeader(h, c.Bytes())
}
// CounterResetHeaderMask is the mask to get the counter reset header bits.
const CounterResetHeaderMask byte = 0b11000000
// GetCounterResetHeader returns the info about the first 2 bits of the chunk
// header.
func (c *HistogramChunk) GetCounterResetHeader() CounterResetHeader {
return CounterResetHeader(c.Bytes()[2] & 0b11000000)
return CounterResetHeader(c.Bytes()[2] & CounterResetHeaderMask)
}
// Compact implements the Chunk interface.
@ -177,7 +165,7 @@ func newHistogramIterator(b []byte) *histogramIterator {
// The first 3 bytes contain chunk headers.
// We skip that for actual samples.
_, _ = it.br.readBits(24)
it.counterResetHeader = CounterResetHeader(b[2] & 0b11000000)
it.counterResetHeader = CounterResetHeader(b[2] & CounterResetHeaderMask)
return it
}
@ -224,7 +212,11 @@ type HistogramAppender struct {
}
func (a *HistogramAppender) GetCounterResetHeader() CounterResetHeader {
return CounterResetHeader(a.b.bytes()[2] & 0b11000000)
return CounterResetHeader(a.b.bytes()[2] & CounterResetHeaderMask)
}
func (a *HistogramAppender) setCounterResetHeader(cr CounterResetHeader) {
a.b.bytes()[2] = (a.b.bytes()[2] & (^CounterResetHeaderMask)) | (byte(cr) & CounterResetHeaderMask)
}
func (a *HistogramAppender) NumSamples() int {
@ -237,13 +229,7 @@ func (a *HistogramAppender) Append(int64, float64) {
panic("appended a float sample to a histogram chunk")
}
// AppendFloatHistogram implements Appender. This implementation panics because float
// histogram samples must never be appended to a histogram chunk.
func (a *HistogramAppender) AppendFloatHistogram(int64, *histogram.FloatHistogram) {
panic("appended a float histogram to a histogram chunk")
}
// Appendable returns whether the chunk can be appended to, and if so whether
// appendable returns whether the chunk can be appended to, and if so whether
// any recoding needs to happen using the provided inserts (in case of any new
// buckets, positive or negative range, respectively). If the sample is a gauge
// histogram, AppendableGauge must be used instead.
@ -260,7 +246,7 @@ func (a *HistogramAppender) AppendFloatHistogram(int64, *histogram.FloatHistogra
// The method returns an additional boolean set to true if it is not appendable
// because of a counter reset. If the given sample is stale, it is always ok to
// append. If counterReset is true, okToAppend is always false.
func (a *HistogramAppender) Appendable(h *histogram.Histogram) (
func (a *HistogramAppender) appendable(h *histogram.Histogram) (
positiveInserts, negativeInserts []Insert,
okToAppend, counterReset bool,
) {
@ -316,7 +302,7 @@ func (a *HistogramAppender) Appendable(h *histogram.Histogram) (
return
}
// AppendableGauge returns whether the chunk can be appended to, and if so
// appendableGauge returns whether the chunk can be appended to, and if so
// whether:
// 1. Any recoding needs to happen to the chunk using the provided inserts
// (in case of any new buckets, positive or negative range, respectively).
@ -330,7 +316,7 @@ func (a *HistogramAppender) Appendable(h *histogram.Histogram) (
// - The schema has changed.
// - The threshold for the zero bucket has changed.
// - The last sample in the chunk was stale while the current sample is not stale.
func (a *HistogramAppender) AppendableGauge(h *histogram.Histogram) (
func (a *HistogramAppender) appendableGauge(h *histogram.Histogram) (
positiveInserts, negativeInserts []Insert,
backwardPositiveInserts, backwardNegativeInserts []Insert,
positiveSpans, negativeSpans []histogram.Span,
@ -427,11 +413,11 @@ func counterResetInAnyBucket(oldBuckets, newBuckets []int64, oldSpans, newSpans
return false
}
// AppendHistogram appends a histogram to the chunk. The caller must ensure that
// appendHistogram appends a histogram to the chunk. The caller must ensure that
// the histogram is properly structured, e.g. the number of buckets used
// corresponds to the number conveyed by the span structures. First call
// Appendable() and act accordingly!
func (a *HistogramAppender) AppendHistogram(t int64, h *histogram.Histogram) {
func (a *HistogramAppender) appendHistogram(t int64, h *histogram.Histogram) {
var tDelta, cntDelta, zCntDelta int64
num := binary.BigEndian.Uint16(a.b.bytes())
@ -540,12 +526,12 @@ func (a *HistogramAppender) AppendHistogram(t int64, h *histogram.Histogram) {
a.sum = h.Sum
}
// Recode converts the current chunk to accommodate an expansion of the set of
// recode converts the current chunk to accommodate an expansion of the set of
// (positive and/or negative) buckets used, according to the provided inserts,
// resulting in the honoring of the provided new positive and negative spans. To
// continue appending, use the returned Appender rather than the receiver of
// this method.
func (a *HistogramAppender) Recode(
func (a *HistogramAppender) recode(
positiveInserts, negativeInserts []Insert,
positiveSpans, negativeSpans []histogram.Span,
) (Chunk, Appender) {
@ -558,8 +544,9 @@ func (a *HistogramAppender) Recode(
hc := NewHistogramChunk()
app, err := hc.Appender()
if err != nil {
panic(err)
panic(err) // This should never happen for an empty histogram chunk.
}
happ := app.(*HistogramAppender)
numPositiveBuckets, numNegativeBuckets := countSpans(positiveSpans), countSpans(negativeSpans)
for it.Next() == ValHistogram {
@ -585,16 +572,16 @@ func (a *HistogramAppender) Recode(
if len(negativeInserts) > 0 {
hOld.NegativeBuckets = insert(hOld.NegativeBuckets, negativeBuckets, negativeInserts, true)
}
app.AppendHistogram(tOld, hOld)
happ.appendHistogram(tOld, hOld)
}
hc.SetCounterResetHeader(CounterResetHeader(byts[2] & 0b11000000))
happ.setCounterResetHeader(CounterResetHeader(byts[2] & CounterResetHeaderMask))
return hc, app
}
// RecodeHistogram converts the current histogram (in-place) to accommodate an
// recodeHistogram converts the current histogram (in-place) to accommodate an
// expansion of the set of (positive and/or negative) buckets used.
func (a *HistogramAppender) RecodeHistogram(
func (a *HistogramAppender) recodeHistogram(
h *histogram.Histogram,
pBackwardInserts, nBackwardInserts []Insert,
) {
@ -612,6 +599,124 @@ func (a *HistogramAppender) writeSumDelta(v float64) {
xorWrite(a.b, v, a.sum, &a.leading, &a.trailing)
}
func (a *HistogramAppender) AppendFloatHistogram(*FloatHistogramAppender, int64, *histogram.FloatHistogram, bool) (Chunk, bool, Appender, error) {
panic("appended a float histogram sample to a histogram chunk")
}
func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h *histogram.Histogram, appendOnly bool) (Chunk, bool, Appender, error) {
if a.NumSamples() == 0 {
a.appendHistogram(t, h)
if h.CounterResetHint == histogram.GaugeType {
a.setCounterResetHeader(GaugeType)
return nil, false, a, nil
}
if prev != nil && h.CounterResetHint != histogram.CounterReset {
// This is a new chunk, but continued from a previous one. We need to calculate the reset header unless already set.
_, _, _, counterReset := prev.appendable(h)
if counterReset {
a.setCounterResetHeader(CounterReset)
} else {
a.setCounterResetHeader(NotCounterReset)
}
} else {
// Honor the explicit counter reset hint.
a.setCounterResetHeader(CounterResetHeader(h.CounterResetHint))
}
return nil, false, a, nil
}
// Adding counter-like histogram.
if h.CounterResetHint != histogram.GaugeType {
pForwardInserts, nForwardInserts, okToAppend, counterReset := a.appendable(h)
if !okToAppend || counterReset {
if appendOnly {
if !okToAppend {
return nil, false, a, fmt.Errorf("histogram schema change")
}
return nil, false, a, fmt.Errorf("histogram counter reset")
}
newChunk := NewHistogramChunk()
app, err := newChunk.Appender()
if err != nil {
panic(err) // This should never happen for an empty histogram chunk.
}
happ := app.(*HistogramAppender)
if counterReset {
happ.setCounterResetHeader(CounterReset)
}
happ.appendHistogram(t, h)
return newChunk, false, app, nil
}
if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
if appendOnly {
return nil, false, a, fmt.Errorf("histogram layout change with %d positive and %d negative forwards inserts", len(pForwardInserts), len(nForwardInserts))
}
chk, app := a.recode(
pForwardInserts, nForwardInserts,
h.PositiveSpans, h.NegativeSpans,
)
app.(*HistogramAppender).appendHistogram(t, h)
return chk, true, app, nil
}
a.appendHistogram(t, h)
return nil, false, a, nil
}
// Adding gauge histogram.
pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, pMergedSpans, nMergedSpans, okToAppend := a.appendableGauge(h)
if !okToAppend {
if appendOnly {
return nil, false, a, fmt.Errorf("gauge histogram schema change")
}
newChunk := NewHistogramChunk()
app, err := newChunk.Appender()
if err != nil {
panic(err) // This should never happen for an empty histogram chunk.
}
happ := app.(*HistogramAppender)
happ.setCounterResetHeader(GaugeType)
happ.appendHistogram(t, h)
return newChunk, false, app, nil
}
if len(pBackwardInserts)+len(nBackwardInserts) > 0 {
if appendOnly {
return nil, false, a, fmt.Errorf("gauge histogram layout change with %d positive and %d negative backwards inserts", len(pBackwardInserts), len(nBackwardInserts))
}
h.PositiveSpans = pMergedSpans
h.NegativeSpans = nMergedSpans
a.recodeHistogram(h, pBackwardInserts, nBackwardInserts)
}
if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
if appendOnly {
return nil, false, a, fmt.Errorf("gauge histogram layout change with %d positive and %d negative forwards inserts", len(pForwardInserts), len(nForwardInserts))
}
chk, app := a.recode(
pForwardInserts, nForwardInserts,
h.PositiveSpans, h.NegativeSpans,
)
app.(*HistogramAppender).appendHistogram(t, h)
return chk, true, app, nil
}
a.appendHistogram(t, h)
return nil, false, a, nil
}
func CounterResetHintToHeader(hint histogram.CounterResetHint) CounterResetHeader {
switch hint {
case histogram.CounterReset:
return CounterReset
case histogram.NotCounterReset:
return NotCounterReset
case histogram.GaugeType:
return GaugeType
default:
return UnknownCounterReset
}
}
type histogramIterator struct {
br bstreamReader
numTotal uint16
@ -715,7 +820,7 @@ func (it *histogramIterator) Reset(b []byte) {
it.numTotal = binary.BigEndian.Uint16(b)
it.numRead = 0
it.counterResetHeader = CounterResetHeader(b[2] & 0b11000000)
it.counterResetHeader = CounterResetHeader(b[2] & CounterResetHeaderMask)
it.t, it.cnt, it.zCnt = 0, 0, 0
it.tDelta, it.cntDelta, it.zCntDelta = 0, 0, 0

View file

@ -14,7 +14,6 @@
package chunkenc
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
@ -55,7 +54,9 @@ func TestHistogramChunkSameBuckets(t *testing.T) {
},
NegativeBuckets: []int64{2, 1, -1, -1}, // counts: 2, 3, 2, 1 (total 8)
}
app.AppendHistogram(ts, h)
chk, _, app, err := app.AppendHistogram(nil, ts, h, false)
require.NoError(t, err)
require.Nil(t, chk)
exp = append(exp, result{t: ts, h: h, fh: h.ToFloat()})
require.Equal(t, 1, c.NumSamples())
@ -67,7 +68,9 @@ func TestHistogramChunkSameBuckets(t *testing.T) {
h.Sum = 24.4
h.PositiveBuckets = []int64{5, -2, 1, -2} // counts: 5, 3, 4, 2 (total 14)
h.NegativeBuckets = []int64{4, -1, 1, -1} // counts: 4, 3, 4, 4 (total 15)
app.AppendHistogram(ts, h)
chk, _, _, err = app.AppendHistogram(nil, ts, h, false)
require.NoError(t, err)
require.Nil(t, chk)
hExp := h.Copy()
hExp.CounterResetHint = histogram.NotCounterReset
exp = append(exp, result{t: ts, h: hExp, fh: hExp.ToFloat()})
@ -84,7 +87,9 @@ func TestHistogramChunkSameBuckets(t *testing.T) {
h.Sum = 24.4
h.PositiveBuckets = []int64{6, 1, -3, 6} // counts: 6, 7, 4, 10 (total 27)
h.NegativeBuckets = []int64{5, 1, -2, 3} // counts: 5, 6, 4, 7 (total 22)
app.AppendHistogram(ts, h)
chk, _, _, err = app.AppendHistogram(nil, ts, h, false)
require.NoError(t, err)
require.Nil(t, chk)
hExp = h.Copy()
hExp.CounterResetHint = histogram.NotCounterReset
exp = append(exp, result{t: ts, h: hExp, fh: hExp.ToFloat()})
@ -182,7 +187,9 @@ func TestHistogramChunkBucketChanges(t *testing.T) {
NegativeBuckets: []int64{1},
}
app.AppendHistogram(ts1, h1)
chk, _, app, err := app.AppendHistogram(nil, ts1, h1, false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 1, c.NumSamples())
// Add a new histogram that has expanded buckets.
@ -208,13 +215,15 @@ func TestHistogramChunkBucketChanges(t *testing.T) {
h2.NegativeBuckets = []int64{2, -1} // 2 1 (total 3)
// This is how span changes will be handled.
hApp, _ := app.(*HistogramAppender)
posInterjections, negInterjections, ok, cr := hApp.Appendable(h2)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Greater(t, len(posInterjections), 0)
require.Greater(t, len(negInterjections), 0)
require.True(t, ok) // Only new buckets came in.
require.False(t, cr)
c, app = hApp.Recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans)
app.AppendHistogram(ts2, h2)
c, app = hApp.recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans)
chk, _, _, err = app.AppendHistogram(nil, ts2, h2, false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 2, c.NumSamples())
@ -244,49 +253,61 @@ func TestHistogramChunkBucketChanges(t *testing.T) {
}
func TestHistogramChunkAppendable(t *testing.T) {
c := Chunk(NewHistogramChunk())
setup := func() (Chunk, *HistogramAppender, int64, *histogram.Histogram) {
c := Chunk(NewHistogramChunk())
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
ts := int64(1234567890)
h1 := &histogram.Histogram{
Count: 5,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // counts: 6, 3, 3, 2, 4, 5, 1 (total 24)
ts := int64(1234567890)
h1 := &histogram.Histogram{
Count: 5,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // counts: 6, 3, 3, 2, 4, 5, 1 (total 24)
}
chk, _, app, err := app.AppendHistogram(nil, ts, h1.Copy(), false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 1, c.NumSamples())
require.Equal(t, UnknownCounterReset, c.(*HistogramChunk).GetCounterResetHeader())
return c, app.(*HistogramAppender), ts, h1
}
app.AppendHistogram(ts, h1.Copy())
require.Equal(t, 1, c.NumSamples())
hApp, _ := app.(*HistogramAppender)
{ // Schema change.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.Schema++
_, _, ok, _ := hApp.Appendable(h2)
_, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // Zero threshold change.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.ZeroThreshold += 0.1
_, _, ok, _ := hApp.Appendable(h2)
_, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // New histogram that has more buckets.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
@ -302,14 +323,17 @@ func TestHistogramChunkAppendable(t *testing.T) {
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30)
posInterjections, negInterjections, ok, cr := hApp.Appendable(h2)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Greater(t, len(posInterjections), 0)
require.Equal(t, 0, len(negInterjections))
require.True(t, ok) // Only new buckets came in.
require.False(t, cr)
assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // New histogram that has a bucket missing.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 2},
@ -320,26 +344,32 @@ func TestHistogramChunkAppendable(t *testing.T) {
h2.Sum = 21
h2.PositiveBuckets = []int64{6, -3, -1, 2, 1, -4} // counts: 6, 3, 2, 4, 5, 1 (total 21)
posInterjections, negInterjections, ok, cr := hApp.Appendable(h2)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Equal(t, 0, len(posInterjections))
require.Equal(t, 0, len(negInterjections))
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
}
{ // New histogram that has a counter reset while buckets are same.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.Sum = 23
h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // counts: 6, 2, 3, 2, 4, 5, 1 (total 23)
posInterjections, negInterjections, ok, cr := hApp.Appendable(h2)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Equal(t, 0, len(posInterjections))
require.Equal(t, 0, len(negInterjections))
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
}
{ // New histogram that has a counter reset while new buckets were added.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
@ -353,11 +383,13 @@ func TestHistogramChunkAppendable(t *testing.T) {
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 0} // 7 5 1 3 1 0 2 5 5 0 0 (total 29)
posInterjections, negInterjections, ok, cr := hApp.Appendable(h2)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Equal(t, 0, len(posInterjections))
require.Equal(t, 0, len(negInterjections))
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
}
{
@ -365,6 +397,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
// added before the first bucket and reset on first bucket. (to
// catch the edge case where the new bucket should be forwarded
// ahead until first old bucket at start)
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: -3, Length: 2},
@ -380,14 +413,55 @@ func TestHistogramChunkAppendable(t *testing.T) {
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.PositiveBuckets = []int64{1, 1, 3, -2, 0, -1, 2, 1, -4} // counts: 1, 2, 5, 3, 3, 2, 4, 5, 1 (total 26)
posInterjections, negInterjections, ok, cr := hApp.Appendable(h2)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Equal(t, 0, len(posInterjections))
require.Equal(t, 0, len(negInterjections))
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
}
}
func assertNewHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) {
oldChunkBytes := oldChunk.Bytes()
newChunk, recoded, newAppender, err := hApp.AppendHistogram(nil, ts, h, false)
require.Equal(t, oldChunkBytes, oldChunk.Bytes()) // Sanity check that previous chunk is untouched.
require.NoError(t, err)
require.NotNil(t, newChunk)
require.False(t, recoded)
require.NotEqual(t, oldChunk, newChunk)
require.Equal(t, expectHeader, newChunk.(*HistogramChunk).GetCounterResetHeader())
require.NotNil(t, newAppender)
require.NotEqual(t, hApp, newAppender)
assertSampleCount(t, newChunk, 1, ValHistogram)
}
func assertRecodedHistogramChunkOnAppend(t *testing.T, prevChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) {
prevChunkBytes := prevChunk.Bytes()
newChunk, recoded, newAppender, err := hApp.AppendHistogram(nil, ts, h, false)
require.Equal(t, prevChunkBytes, prevChunk.Bytes()) // Sanity check that previous chunk is untouched. This may change in the future if we implement in-place recoding.
require.NoError(t, err)
require.NotNil(t, newChunk)
require.True(t, recoded)
require.NotEqual(t, prevChunk, newChunk)
require.Equal(t, expectHeader, newChunk.(*HistogramChunk).GetCounterResetHeader())
require.NotNil(t, newAppender)
require.NotEqual(t, hApp, newAppender)
assertSampleCount(t, newChunk, 2, ValHistogram)
}
func assertSampleCount(t *testing.T, c Chunk, exp int64, vtype ValueType) {
count := int64(0)
it := c.Iterator(nil)
require.NoError(t, it.Err())
for it.Next() == vtype {
count++
}
require.NoError(t, it.Err())
require.Equal(t, exp, count)
}
func TestHistogramChunkAppendableWithEmptySpan(t *testing.T) {
h1 := &histogram.Histogram{
Schema: 0,
@ -435,11 +509,12 @@ func TestHistogramChunkAppendableWithEmptySpan(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
app.AppendHistogram(1, h1)
_, _, _, err = app.AppendHistogram(nil, 1, h1, true)
require.NoError(t, err)
require.Equal(t, 1, c.NumSamples())
hApp, _ := app.(*HistogramAppender)
pI, nI, okToAppend, counterReset := hApp.Appendable(h2)
pI, nI, okToAppend, counterReset := hApp.appendable(h2)
require.Empty(t, pI)
require.Empty(t, nI)
require.True(t, okToAppend)
@ -573,11 +648,9 @@ func TestAtFloatHistogram(t *testing.T) {
app, err := chk.Appender()
require.NoError(t, err)
for i := range input {
if i > 0 {
_, _, okToAppend, _ := app.(*HistogramAppender).Appendable(&input[i])
require.True(t, okToAppend, fmt.Sprintf("idx: %d", i))
}
app.AppendHistogram(int64(i), &input[i])
newc, _, _, err := app.AppendHistogram(nil, int64(i), &input[i], false)
require.NoError(t, err)
require.Nil(t, newc)
}
it := chk.Iterator(nil)
i := int64(0)
@ -590,51 +663,75 @@ func TestAtFloatHistogram(t *testing.T) {
}
func TestHistogramChunkAppendableGauge(t *testing.T) {
c := Chunk(NewHistogramChunk())
setup := func() (Chunk, *HistogramAppender, int64, *histogram.Histogram) {
c := Chunk(NewHistogramChunk())
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
ts := int64(1234567890)
h1 := &histogram.Histogram{
Count: 5,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // {6, 3, 3, 2, 4, 5, 1}
ts := int64(1234567890)
h1 := &histogram.Histogram{
CounterResetHint: histogram.GaugeType,
Count: 5,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // {6, 3, 3, 2, 4, 5, 1}
}
chk, _, app, err := app.AppendHistogram(nil, ts, h1.Copy(), false)
require.NoError(t, err)
require.Nil(t, chk)
require.Equal(t, 1, c.NumSamples())
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
return c, app.(*HistogramAppender), ts, h1
}
app.AppendHistogram(ts, h1.Copy())
require.Equal(t, 1, c.NumSamples())
c.(*HistogramChunk).SetCounterResetHeader(GaugeType)
{ // Schema change.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.Schema++
hApp, _ := app.(*HistogramAppender)
_, _, _, _, _, _, ok := hApp.AppendableGauge(h2)
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
require.False(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
require.NoError(t, err)
require.NotNil(t, newc)
require.False(t, recoded)
require.NotEqual(t, c, newc)
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
require.Equal(t, GaugeType, newc.(*HistogramChunk).GetCounterResetHeader())
}
{ // Zero threshold change.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.ZeroThreshold += 0.1
hApp, _ := app.(*HistogramAppender)
_, _, _, _, _, _, ok := hApp.AppendableGauge(h2)
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
require.False(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
require.NoError(t, err)
require.NotNil(t, newc)
require.False(t, recoded)
require.NotEqual(t, c, newc)
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
require.Equal(t, GaugeType, newc.(*HistogramChunk).GetCounterResetHeader())
}
{ // New histogram that has more buckets.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
@ -647,16 +744,22 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
h2.Sum = 30
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // {7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1}
hApp, _ := app.(*HistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
require.NoError(t, err)
require.NotNil(t, newc)
require.True(t, recoded)
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
}
{ // New histogram that has buckets missing.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 2},
@ -669,16 +772,22 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
h2.Sum--
h2.PositiveBuckets = []int64{6, -3, 0, -1, 3, -4} // {6, 3, 3, 2, 5, 1}
hApp, _ := app.(*HistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Len(t, pI, 0)
require.Len(t, nI, 0)
require.Greater(t, len(pBackwardI), 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
require.NoError(t, err)
require.Nil(t, newc)
require.False(t, recoded)
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
}
{ // New histogram that has a bucket missing and new buckets.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 2},
@ -689,30 +798,42 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
h2.Sum = 21
h2.PositiveBuckets = []int64{6, -3, -1, 2, 1, -4} // {6, 3, 2, 4, 5, 1}
hApp, _ := app.(*HistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Greater(t, len(pBackwardI), 0)
require.Len(t, nI, 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
require.NoError(t, err)
require.NotNil(t, newc)
require.True(t, recoded)
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
}
{ // New histogram that has a counter reset while buckets are same.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.Sum = 23
h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // {6, 2, 3, 2, 4, 5, 1}
hApp, _ := app.(*HistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Len(t, pI, 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
require.NoError(t, err)
require.Nil(t, newc)
require.False(t, recoded)
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
}
{ // New histogram that has a counter reset while new buckets were added.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
@ -723,18 +844,24 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
h2.Sum = 29
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 0} // {7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 0}
hApp, _ := app.(*HistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
require.NoError(t, err)
require.NotNil(t, newc)
require.True(t, recoded)
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
}
{
// New histogram that has a counter reset while new buckets were
// added before the first bucket and reset on first bucket.
c, hApp, ts, h1 := setup()
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: -3, Length: 2},
@ -747,12 +874,17 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
h2.Sum = 26
h2.PositiveBuckets = []int64{1, 1, 3, -2, 0, -1, 2, 1, -4} // {1, 2, 5, 3, 3, 2, 4, 5, 1}
hApp, _ := app.(*HistogramAppender)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.AppendableGauge(h2)
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.True(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
require.NoError(t, err)
require.NotNil(t, newc)
require.True(t, recoded)
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
}
}

View file

@ -152,14 +152,6 @@ type xorAppender struct {
trailing uint8
}
func (a *xorAppender) AppendHistogram(int64, *histogram.Histogram) {
panic("appended a histogram to an xor chunk")
}
func (a *xorAppender) AppendFloatHistogram(int64, *histogram.FloatHistogram) {
panic("appended a float histogram to an xor chunk")
}
func (a *xorAppender) Append(t int64, v float64) {
var tDelta uint64
num := binary.BigEndian.Uint16(a.b.bytes())
@ -228,6 +220,14 @@ func (a *xorAppender) writeVDelta(v float64) {
xorWrite(a.b, v, a.v, &a.leading, &a.trailing)
}
func (a *xorAppender) AppendHistogram(*HistogramAppender, int64, *histogram.Histogram, bool) (Chunk, bool, Appender, error) {
panic("appended a histogram sample to a float chunk")
}
func (a *xorAppender) AppendFloatHistogram(*FloatHistogramAppender, int64, *histogram.FloatHistogram, bool) (Chunk, bool, Appender, error) {
panic("appended a float histogram sample to a float chunk")
}
type xorIterator struct {
br bstreamReader
numTotal uint16

View file

@ -2720,14 +2720,20 @@ func TestDBQueryDoesntSeeAppendsAfterCreation(t *testing.T) {
require.Equal(t, map[string][]sample{`{foo="bar"}`: {{t: 0, f: 0}}}, seriesSet)
}
func assureChunkFromSamples(t *testing.T, samples []tsdbutil.Sample) chunks.Meta {
chks, err := tsdbutil.ChunkFromSamples(samples)
require.NoError(t, err)
return chks
}
// TestChunkWriter_ReadAfterWrite ensures that chunk segment are cut at the set segment size and
// that the resulted segments includes the expected chunks data.
func TestChunkWriter_ReadAfterWrite(t *testing.T) {
chk1 := tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 1, nil, nil}})
chk2 := tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 2, nil, nil}})
chk3 := tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 3, nil, nil}})
chk4 := tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 4, nil, nil}})
chk5 := tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 5, nil, nil}})
chk1 := assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 1, nil, nil}})
chk2 := assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 2, nil, nil}})
chk3 := assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 3, nil, nil}})
chk4 := assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 4, nil, nil}})
chk5 := assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 5, nil, nil}})
chunkSize := len(chk1.Chunk.Bytes()) + chunks.MaxChunkLengthFieldSize + chunks.ChunkEncodingSize + crc32.Size
tests := []struct {
@ -2927,11 +2933,11 @@ func TestRangeForTimestamp(t *testing.T) {
// Regression test for https://github.com/prometheus/prometheus/pull/6514.
func TestChunkReader_ConcurrentReads(t *testing.T) {
chks := []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 1, nil, nil}}),
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 2, nil, nil}}),
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 3, nil, nil}}),
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 4, nil, nil}}),
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 5, nil, nil}}),
assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 1, nil, nil}}),
assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 2, nil, nil}}),
assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 3, nil, nil}}),
assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 4, nil, nil}}),
assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 5, nil, nil}}),
}
tempDir := t.TempDir()

View file

@ -365,8 +365,8 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp
if prev := ce.exemplars[ce.nextIndex]; prev == nil {
ce.exemplars[ce.nextIndex] = &circularBufferEntry{}
} else {
// There exists exemplar already on this ce.nextIndex entry, drop it, to make place
// for others.
// There exists an exemplar already on this ce.nextIndex entry,
// drop it, to make place for others.
var buf [1024]byte
prevLabels := prev.ref.seriesLabels.Bytes(buf[:])
if prev.next == noExemplar {

View file

@ -1162,87 +1162,32 @@ func (s *memSeries) append(t int64, v float64, appendID uint64, o chunkOpts) (sa
// appendHistogram adds the histogram.
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
// In case of recoding the existing chunk, a new chunk is allocated and the old chunk is dropped.
// To keep the meaning of prometheus_tsdb_head_chunks and prometheus_tsdb_head_chunks_created_total
// consistent, we return chunkCreated=false in this case.
func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID uint64, o chunkOpts) (sampleInOrder, chunkCreated bool) {
// Head controls the execution of recoding, so that we own the proper
// chunk reference afterwards. We check for Appendable from appender before
// appendPreprocessor because in case it ends up creating a new chunk,
// we need to know if there was also a counter reset or not to set the
// meta properly.
app, _ := s.app.(*chunkenc.HistogramAppender)
var (
pForwardInserts, nForwardInserts []chunkenc.Insert
pBackwardInserts, nBackwardInserts []chunkenc.Insert
pMergedSpans, nMergedSpans []histogram.Span
okToAppend, counterReset, gauge bool
)
// chunk reference afterwards and mmap used up chunks.
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
prevApp, _ := s.app.(*chunkenc.HistogramAppender)
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, o)
if !sampleInOrder {
return sampleInOrder, chunkCreated
}
switch h.CounterResetHint {
case histogram.GaugeType:
gauge = true
if app != nil {
pForwardInserts, nForwardInserts,
pBackwardInserts, nBackwardInserts,
pMergedSpans, nMergedSpans,
okToAppend = app.AppendableGauge(h)
}
case histogram.CounterReset:
// The caller tells us this is a counter reset, even if it
// doesn't look like one.
counterReset = true
default:
if app != nil {
pForwardInserts, nForwardInserts, okToAppend, counterReset = app.Appendable(h)
}
}
var (
newChunk chunkenc.Chunk
recoded bool
)
if !chunkCreated {
if len(pBackwardInserts)+len(nBackwardInserts) > 0 {
h.PositiveSpans = pMergedSpans
h.NegativeSpans = nMergedSpans
app.RecodeHistogram(h, pBackwardInserts, nBackwardInserts)
}
// We have 3 cases here
// - !okToAppend or counterReset -> We need to cut a new chunk.
// - okToAppend but we have inserts → Existing chunk needs
// recoding before we can append our histogram.
// - okToAppend and no inserts → Chunk is ready to support our histogram.
switch {
case !okToAppend || counterReset:
c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, o.chunkDiskMapper, o.chunkRange)
chunkCreated = true
case len(pForwardInserts) > 0 || len(nForwardInserts) > 0:
// New buckets have appeared. We need to recode all
// prior histogram samples within the chunk before we
// can process this one.
chunk, app := app.Recode(
pForwardInserts, nForwardInserts,
h.PositiveSpans, h.NegativeSpans,
)
c.chunk = chunk
s.app = app
}
// Ignore the previous appender if we continue the current chunk.
prevApp = nil
}
if chunkCreated {
hc := s.headChunk.chunk.(*chunkenc.HistogramChunk)
header := chunkenc.UnknownCounterReset
switch {
case gauge:
header = chunkenc.GaugeType
case counterReset:
header = chunkenc.CounterReset
case okToAppend:
header = chunkenc.NotCounterReset
}
hc.SetCounterResetHeader(header)
}
s.app.AppendHistogram(t, h)
c.maxTime = t
newChunk, recoded, s.app, _ = s.app.AppendHistogram(prevApp, t, h, false) // false=request a new chunk if needed
s.lastHistogramValue = h
s.lastFloatHistogramValue = nil
@ -1251,101 +1196,84 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui
s.txs.add(appendID)
}
return true, chunkCreated
if newChunk == nil { // Sample was appended to existing chunk or is the first sample in a new chunk.
c.maxTime = t
return true, chunkCreated
}
if recoded { // The appender needed to recode the chunk.
c.maxTime = t
c.chunk = newChunk
return true, false
}
// This is a brand new chunk, switch out the head chunk (based on cutNewHeadChunk).
s.mmapCurrentHeadChunk(o.chunkDiskMapper)
s.headChunk = &memChunk{
chunk: newChunk,
minTime: t,
maxTime: t,
}
s.nextAt = rangeForTimestamp(t, o.chunkRange)
return true, true
}
// appendFloatHistogram adds the float histogram.
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
// In case of recoding the existing chunk, a new chunk is allocated and the old chunk is dropped.
// To keep the meaning of prometheus_tsdb_head_chunks and prometheus_tsdb_head_chunks_created_total
// consistent, we return chunkCreated=false in this case.
func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, appendID uint64, o chunkOpts) (sampleInOrder, chunkCreated bool) {
// Head controls the execution of recoding, so that we own the proper
// chunk reference afterwards. We check for Appendable from appender before
// appendPreprocessor because in case it ends up creating a new chunk,
// we need to know if there was also a counter reset or not to set the
// meta properly.
app, _ := s.app.(*chunkenc.FloatHistogramAppender)
var (
pForwardInserts, nForwardInserts []chunkenc.Insert
pBackwardInserts, nBackwardInserts []chunkenc.Insert
pMergedSpans, nMergedSpans []histogram.Span
okToAppend, counterReset, gauge bool
)
// chunk reference afterwards and mmap used up chunks.
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
prevApp, _ := s.app.(*chunkenc.FloatHistogramAppender)
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, o)
if !sampleInOrder {
return sampleInOrder, chunkCreated
}
switch fh.CounterResetHint {
case histogram.GaugeType:
gauge = true
if app != nil {
pForwardInserts, nForwardInserts,
pBackwardInserts, nBackwardInserts,
pMergedSpans, nMergedSpans,
okToAppend = app.AppendableGauge(fh)
}
case histogram.CounterReset:
// The caller tells us this is a counter reset, even if it
// doesn't look like one.
counterReset = true
default:
if app != nil {
pForwardInserts, nForwardInserts, okToAppend, counterReset = app.Appendable(fh)
}
}
var (
newChunk chunkenc.Chunk
recoded bool
)
if !chunkCreated {
if len(pBackwardInserts)+len(nBackwardInserts) > 0 {
fh.PositiveSpans = pMergedSpans
fh.NegativeSpans = nMergedSpans
app.RecodeHistogramm(fh, pBackwardInserts, nBackwardInserts)
}
// We have 3 cases here
// - !okToAppend or counterReset -> We need to cut a new chunk.
// - okToAppend but we have inserts → Existing chunk needs
// recoding before we can append our histogram.
// - okToAppend and no inserts → Chunk is ready to support our histogram.
switch {
case !okToAppend || counterReset:
c = s.cutNewHeadChunk(t, chunkenc.EncFloatHistogram, o.chunkDiskMapper, o.chunkRange)
chunkCreated = true
case len(pForwardInserts) > 0 || len(nForwardInserts) > 0:
// New buckets have appeared. We need to recode all
// prior histogram samples within the chunk before we
// can process this one.
chunk, app := app.Recode(
pForwardInserts, nForwardInserts,
fh.PositiveSpans, fh.NegativeSpans,
)
c.chunk = chunk
s.app = app
}
// Ignore the previous appender if we continue the current chunk.
prevApp = nil
}
if chunkCreated {
hc := s.headChunk.chunk.(*chunkenc.FloatHistogramChunk)
header := chunkenc.UnknownCounterReset
switch {
case gauge:
header = chunkenc.GaugeType
case counterReset:
header = chunkenc.CounterReset
case okToAppend:
header = chunkenc.NotCounterReset
}
hc.SetCounterResetHeader(header)
}
newChunk, recoded, s.app, _ = s.app.AppendFloatHistogram(prevApp, t, fh, false) // False means request a new chunk if needed.
s.app.AppendFloatHistogram(t, fh)
c.maxTime = t
s.lastFloatHistogramValue = fh
s.lastHistogramValue = nil
s.lastFloatHistogramValue = fh
if appendID > 0 {
s.txs.add(appendID)
}
return true, chunkCreated
if newChunk == nil { // Sample was appended to existing chunk or is the first sample in a new chunk.
c.maxTime = t
return true, chunkCreated
}
if recoded { // The appender needed to recode the chunk.
c.maxTime = t
c.chunk = newChunk
return true, false
}
// This is a brand new chunk, switch out the head chunk (based on cutNewHeadChunk).
s.mmapCurrentHeadChunk(o.chunkDiskMapper)
s.headChunk = &memChunk{
chunk: newChunk,
minTime: t,
maxTime: t,
}
s.nextAt = rangeForTimestamp(t, o.chunkRange)
return true, true
}
// appendPreprocessor takes care of cutting new chunks and m-mapping old chunks.

View file

@ -3908,7 +3908,7 @@ func TestHistogramCounterResetHeader(t *testing.T) {
appendHistogram(h)
checkExpCounterResetHeader(chunkenc.CounterReset)
// Add 2 non-counter reset histograms.
// Add 2 non-counter reset histogram chunks.
for i := 0; i < 250; i++ {
appendHistogram(h)
}

View file

@ -874,74 +874,17 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
break
}
switch hc := p.currChkMeta.Chunk.(type) {
case *chunkenc.HistogramChunk:
newChunk.(*chunkenc.HistogramChunk).SetCounterResetHeader(hc.GetCounterResetHeader())
case *safeHeadChunk:
if unwrapped, ok := hc.Chunk.(*chunkenc.HistogramChunk); ok {
newChunk.(*chunkenc.HistogramChunk).SetCounterResetHeader(unwrapped.GetCounterResetHeader())
} else {
err = fmt.Errorf("internal error, could not unwrap safeHeadChunk to histogram chunk: %T", hc.Chunk)
}
default:
err = fmt.Errorf("internal error, unknown chunk type %T when expecting histogram", p.currChkMeta.Chunk)
}
if err != nil {
break
}
var h *histogram.Histogram
t, h = p.currDelIter.AtHistogram()
p.curr.MinTime = t
// Detect missing gauge reset hint.
if h.CounterResetHint == histogram.GaugeType && newChunk.(*chunkenc.HistogramChunk).GetCounterResetHeader() != chunkenc.GaugeType {
err = fmt.Errorf("found gauge histogram in non gauge chunk")
break
}
app.AppendHistogram(t, h)
for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
if vt != chunkenc.ValHistogram {
err = fmt.Errorf("found value type %v in histogram chunk", vt)
break
}
var h *histogram.Histogram
t, h = p.currDelIter.AtHistogram()
// Defend against corrupted chunks.
if h.CounterResetHint == histogram.GaugeType {
pI, nI, bpI, bnI, _, _, okToAppend := app.(*chunkenc.HistogramAppender).AppendableGauge(h)
if !okToAppend {
err = errors.New("unable to append histogram due to unexpected schema change")
break
}
if len(pI)+len(nI)+len(bpI)+len(bnI) > 0 {
err = fmt.Errorf(
"bucket layout has changed unexpectedly: forward %d positive, %d negative, backward %d positive %d negative bucket interjections required",
len(pI), len(nI), len(bpI), len(bnI),
)
break
}
} else {
pI, nI, okToAppend, counterReset := app.(*chunkenc.HistogramAppender).Appendable(h)
if len(pI)+len(nI) > 0 {
err = fmt.Errorf(
"bucket layout has changed unexpectedly: %d positive and %d negative bucket interjections required",
len(pI), len(nI),
)
break
}
if counterReset {
err = errors.New("detected unexpected counter reset in histogram")
break
}
if !okToAppend {
err = errors.New("unable to append histogram due to unexpected schema change")
break
}
_, _, app, err = app.AppendHistogram(nil, t, h, true)
if err != nil {
break
}
app.AppendHistogram(t, h)
}
case chunkenc.ValFloat:
newChunk = chunkenc.NewXORChunk()
@ -966,75 +909,17 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
break
}
switch hc := p.currChkMeta.Chunk.(type) {
case *chunkenc.FloatHistogramChunk:
newChunk.(*chunkenc.FloatHistogramChunk).SetCounterResetHeader(hc.GetCounterResetHeader())
case *safeHeadChunk:
if unwrapped, ok := hc.Chunk.(*chunkenc.FloatHistogramChunk); ok {
newChunk.(*chunkenc.FloatHistogramChunk).SetCounterResetHeader(unwrapped.GetCounterResetHeader())
} else {
err = fmt.Errorf("internal error, could not unwrap safeHeadChunk to float histogram chunk: %T", hc.Chunk)
}
default:
err = fmt.Errorf("internal error, unknown chunk type %T when expecting float histogram", p.currChkMeta.Chunk)
}
if err != nil {
break
}
var h *histogram.FloatHistogram
t, h = p.currDelIter.AtFloatHistogram()
p.curr.MinTime = t
// Detect missing gauge reset hint.
if h.CounterResetHint == histogram.GaugeType && newChunk.(*chunkenc.FloatHistogramChunk).GetCounterResetHeader() != chunkenc.GaugeType {
err = fmt.Errorf("found float gauge histogram in non gauge chunk")
break
}
app.AppendFloatHistogram(t, h)
for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
if vt != chunkenc.ValFloatHistogram {
err = fmt.Errorf("found value type %v in histogram chunk", vt)
break
}
var h *histogram.FloatHistogram
t, h = p.currDelIter.AtFloatHistogram()
// Defend against corrupted chunks.
if h.CounterResetHint == histogram.GaugeType {
pI, nI, bpI, bnI, _, _, okToAppend := app.(*chunkenc.FloatHistogramAppender).AppendableGauge(h)
if !okToAppend {
err = errors.New("unable to append histogram due to unexpected schema change")
break
}
if len(pI)+len(nI)+len(bpI)+len(bnI) > 0 {
err = fmt.Errorf(
"bucket layout has changed unexpectedly: forward %d positive, %d negative, backward %d positive %d negative bucket interjections required",
len(pI), len(nI), len(bpI), len(bnI),
)
break
}
} else {
pI, nI, okToAppend, counterReset := app.(*chunkenc.FloatHistogramAppender).Appendable(h)
if len(pI)+len(nI) > 0 {
err = fmt.Errorf(
"bucket layout has changed unexpectedly: %d positive and %d negative bucket interjections required",
len(pI), len(nI),
)
break
}
if counterReset {
err = errors.New("detected unexpected counter reset in histogram")
break
}
if !okToAppend {
err = errors.New("unable to append histogram due to unexpected schema change")
break
}
_, _, app, err = app.AppendFloatHistogram(nil, t, h, true)
if err != nil {
break
}
app.AppendFloatHistogram(t, h)
}
default:
err = fmt.Errorf("populateWithDelChunkSeriesIterator: value type %v unsupported", valueType)

View file

@ -710,7 +710,7 @@ func createFakeReaderAndNotPopulatedChunks(s ...[]tsdbutil.Sample) (*fakeChunksR
chks := make([]chunks.Meta, 0, len(s))
for ref, samples := range s {
chk := tsdbutil.ChunkFromSamples(samples)
chk, _ := tsdbutil.ChunkFromSamples(samples)
f.chks[chunks.ChunkRef(ref)] = chk.Chunk
chks = append(chks, chunks.Meta{
@ -753,7 +753,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
chks: [][]tsdbutil.Sample{{}},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{}),
assureChunkFromSamples(t, []tsdbutil.Sample{}),
},
},
{
@ -761,9 +761,9 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
chks: [][]tsdbutil.Sample{{}, {}, {}},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{}),
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{}),
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{}),
assureChunkFromSamples(t, []tsdbutil.Sample{}),
assureChunkFromSamples(t, []tsdbutil.Sample{}),
assureChunkFromSamples(t, []tsdbutil.Sample{}),
},
},
{
@ -776,7 +776,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
assureChunkFromSamples(t, []tsdbutil.Sample{
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
}),
},
@ -792,10 +792,10 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
assureChunkFromSamples(t, []tsdbutil.Sample{
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
}),
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
assureChunkFromSamples(t, []tsdbutil.Sample{
sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
}),
},
@ -812,13 +812,13 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil}, sample{10, 22, nil, nil}, sample{203, 3493, nil, nil},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
assureChunkFromSamples(t, []tsdbutil.Sample{
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
}),
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
assureChunkFromSamples(t, []tsdbutil.Sample{
sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
}),
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
assureChunkFromSamples(t, []tsdbutil.Sample{
sample{10, 22, nil, nil}, sample{203, 3493, nil, nil},
}),
},
@ -885,10 +885,10 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
assureChunkFromSamples(t, []tsdbutil.Sample{
sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
}),
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
assureChunkFromSamples(t, []tsdbutil.Sample{
sample{7, 89, nil, nil},
}),
},
@ -905,10 +905,10 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{1, 2, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
assureChunkFromSamples(t, []tsdbutil.Sample{
sample{1, 2, nil, nil}, sample{6, 1, nil, nil},
}),
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
assureChunkFromSamples(t, []tsdbutil.Sample{
sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
}),
},
@ -925,10 +925,10 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{9, 8, nil, nil},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
assureChunkFromSamples(t, []tsdbutil.Sample{
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil},
}),
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
assureChunkFromSamples(t, []tsdbutil.Sample{
sample{9, 8, nil, nil},
}),
},
@ -965,7 +965,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
assureChunkFromSamples(t, []tsdbutil.Sample{
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
@ -990,7 +990,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
assureChunkFromSamples(t, []tsdbutil.Sample{
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
@ -1014,7 +1014,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
assureChunkFromSamples(t, []tsdbutil.Sample{
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
@ -1039,7 +1039,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
assureChunkFromSamples(t, []tsdbutil.Sample{
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
@ -1063,7 +1063,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
assureChunkFromSamples(t, []tsdbutil.Sample{
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
@ -1088,7 +1088,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
assureChunkFromSamples(t, []tsdbutil.Sample{
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
@ -1112,7 +1112,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
assureChunkFromSamples(t, []tsdbutil.Sample{
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
@ -1137,7 +1137,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
assureChunkFromSamples(t, []tsdbutil.Sample{
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},

View file

@ -40,12 +40,13 @@ func (s SampleSlice) Get(i int) Sample { return s[i] }
func (s SampleSlice) Len() int { return len(s) }
// ChunkFromSamples requires all samples to have the same type.
func ChunkFromSamples(s []Sample) chunks.Meta {
func ChunkFromSamples(s []Sample) (chunks.Meta, error) {
return ChunkFromSamplesGeneric(SampleSlice(s))
}
// ChunkFromSamplesGeneric requires all samples to have the same type.
func ChunkFromSamplesGeneric(s Samples) chunks.Meta {
func ChunkFromSamplesGeneric(s Samples) (chunks.Meta, error) {
emptyChunk := chunks.Meta{Chunk: chunkenc.NewXORChunk()}
mint, maxt := int64(0), int64(0)
if s.Len() > 0 {
@ -53,36 +54,37 @@ func ChunkFromSamplesGeneric(s Samples) chunks.Meta {
}
if s.Len() == 0 {
return chunks.Meta{
Chunk: chunkenc.NewXORChunk(),
}
return emptyChunk, nil
}
sampleType := s.Get(0).Type()
c, err := chunkenc.NewEmptyChunk(sampleType.ChunkEncoding())
if err != nil {
panic(err) // TODO(codesome): dont panic.
return chunks.Meta{}, err
}
ca, _ := c.Appender()
var newChunk chunkenc.Chunk
for i := 0; i < s.Len(); i++ {
switch sampleType {
case chunkenc.ValFloat:
ca.Append(s.Get(i).T(), s.Get(i).F())
case chunkenc.ValHistogram:
h := s.Get(i).H()
ca.AppendHistogram(s.Get(i).T(), h)
if i == 0 && h.CounterResetHint == histogram.GaugeType {
hc := c.(*chunkenc.HistogramChunk)
hc.SetCounterResetHeader(chunkenc.GaugeType)
newChunk, _, ca, err = ca.AppendHistogram(nil, s.Get(i).T(), s.Get(i).H(), false)
if err != nil {
return emptyChunk, err
}
if newChunk != nil {
return emptyChunk, fmt.Errorf("did not expect to start a second chunk")
}
case chunkenc.ValFloatHistogram:
fh := s.Get(i).FH()
ca.AppendFloatHistogram(s.Get(i).T(), fh)
if i == 0 && fh.CounterResetHint == histogram.GaugeType {
hc := c.(*chunkenc.FloatHistogramChunk)
hc.SetCounterResetHeader(chunkenc.GaugeType)
newChunk, _, ca, err = ca.AppendFloatHistogram(nil, s.Get(i).T(), s.Get(i).FH(), false)
if err != nil {
return emptyChunk, err
}
if newChunk != nil {
return emptyChunk, fmt.Errorf("did not expect to start a second chunk")
}
default:
panic(fmt.Sprintf("unknown sample type %s", sampleType.String()))
@ -92,7 +94,7 @@ func ChunkFromSamplesGeneric(s Samples) chunks.Meta {
MinTime: mint,
MaxTime: maxt,
Chunk: c,
}
}, nil
}
type sample struct {
@ -130,7 +132,7 @@ func (s sample) Type() chunkenc.ValueType {
}
// PopulatedChunk creates a chunk populated with samples every second starting at minTime
func PopulatedChunk(numSamples int, minTime int64) chunks.Meta {
func PopulatedChunk(numSamples int, minTime int64) (chunks.Meta, error) {
samples := make([]Sample, numSamples)
for i := 0; i < numSamples; i++ {
samples[i] = sample{t: minTime + int64(i*1000), f: 1.0}

View file

@ -217,6 +217,7 @@ type API struct {
remoteWriteHandler http.Handler
remoteReadHandler http.Handler
otlpWriteHandler http.Handler
codecs []Codec
}
@ -249,6 +250,8 @@ func NewAPI(
gatherer prometheus.Gatherer,
registerer prometheus.Registerer,
statsRenderer StatsRenderer,
rwEnabled bool,
otlpEnabled bool,
) *API {
a := &API{
QueryEngine: qe,
@ -285,9 +288,16 @@ func NewAPI(
a.statsRenderer = statsRenderer
}
if ap != nil {
if ap == nil && (rwEnabled || otlpEnabled) {
panic("remote write or otlp write enabled, but no appender passed in.")
}
if rwEnabled {
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap)
}
if otlpEnabled {
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap)
}
return a
}
@ -380,6 +390,7 @@ func (api *API) Register(r *route.Router) {
r.Get("/status/walreplay", api.serveWALReplayStatus)
r.Post("/read", api.ready(api.remoteRead))
r.Post("/write", api.ready(api.remoteWrite))
r.Post("/otlp/v1/metrics", api.ready(api.otlpWrite))
r.Get("/alerts", wrapAgent(api.alerts))
r.Get("/rules", wrapAgent(api.rules))
@ -1581,6 +1592,14 @@ func (api *API) remoteWrite(w http.ResponseWriter, r *http.Request) {
}
}
func (api *API) otlpWrite(w http.ResponseWriter, r *http.Request) {
if api.otlpWriteHandler != nil {
api.otlpWriteHandler.ServeHTTP(w, r)
} else {
http.Error(w, "otlp write receiver needs to be enabled with --enable-feature=otlp-write-receiver", http.StatusNotFound)
}
}
func (api *API) deleteSeries(r *http.Request) apiFuncResult {
if !api.enableAdmin {
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil}

View file

@ -134,6 +134,8 @@ func createPrometheusAPI(q storage.SampleAndChunkQueryable) *route.Router {
prometheus.DefaultGatherer,
nil,
nil,
false,
false,
)
promRouter := route.New().WithPrefix("/api/v1")

View file

@ -335,18 +335,41 @@ func TestFederationWithNativeHistograms(t *testing.T) {
},
NegativeBuckets: []int64{1, 1, -1, 0},
}
histWithoutZeroBucket := &histogram.Histogram{
Count: 20,
Sum: 99.23,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{2, 2, -2, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
NegativeBuckets: []int64{2, 2, -2, 0},
}
app := db.Appender(context.Background())
for i := 0; i < 6; i++ {
l := labels.FromStrings("__name__", "test_metric", "foo", fmt.Sprintf("%d", i))
expL := labels.FromStrings("__name__", "test_metric", "instance", "", "foo", fmt.Sprintf("%d", i))
if i%3 == 0 {
switch i {
case 0, 3:
_, err = app.Append(0, l, 100*60*1000, float64(i*100))
expVec = append(expVec, promql.Sample{
T: 100 * 60 * 1000,
F: float64(i * 100),
Metric: expL,
})
} else {
case 4:
_, err = app.AppendHistogram(0, l, 100*60*1000, histWithoutZeroBucket.Copy(), nil)
expVec = append(expVec, promql.Sample{
T: 100 * 60 * 1000,
H: histWithoutZeroBucket.ToFloat(),
Metric: expL,
})
default:
hist.ZeroCount++
_, err = app.AppendHistogram(0, l, 100*60*1000, hist.Copy(), nil)
expVec = append(expVec, promql.Sample{

View file

@ -1,6 +1,6 @@
{
"name": "@prometheus-io/codemirror-promql",
"version": "0.45.0",
"version": "0.46.0",
"description": "a CodeMirror mode for the PromQL language",
"types": "dist/esm/index.d.ts",
"module": "dist/esm/index.js",
@ -29,7 +29,7 @@
},
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": {
"@prometheus-io/lezer-promql": "0.45.0",
"@prometheus-io/lezer-promql": "0.46.0",
"lru-cache": "^6.0.0"
},
"devDependencies": {

View file

@ -1,6 +1,6 @@
{
"name": "@prometheus-io/lezer-promql",
"version": "0.45.0",
"version": "0.46.0",
"description": "lezer-based PromQL grammar",
"main": "dist/index.cjs",
"type": "module",

View file

@ -28,10 +28,10 @@
},
"module/codemirror-promql": {
"name": "@prometheus-io/codemirror-promql",
"version": "0.45.0",
"version": "0.46.0",
"license": "Apache-2.0",
"dependencies": {
"@prometheus-io/lezer-promql": "0.45.0",
"@prometheus-io/lezer-promql": "0.46.0",
"lru-cache": "^6.0.0"
},
"devDependencies": {
@ -61,7 +61,7 @@
},
"module/lezer-promql": {
"name": "@prometheus-io/lezer-promql",
"version": "0.45.0",
"version": "0.46.0",
"license": "Apache-2.0",
"devDependencies": {
"@lezer/generator": "^1.2.3",
@ -20765,7 +20765,7 @@
},
"react-app": {
"name": "@prometheus-io/app",
"version": "0.45.0",
"version": "0.46.0",
"dependencies": {
"@codemirror/autocomplete": "^6.7.1",
"@codemirror/commands": "^6.2.4",
@ -20783,7 +20783,7 @@
"@lezer/lr": "^1.3.6",
"@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.45.0",
"@prometheus-io/codemirror-promql": "0.46.0",
"bootstrap": "^4.6.2",
"css.escape": "^1.5.1",
"downshift": "^7.6.0",
@ -23423,7 +23423,7 @@
"@lezer/lr": "^1.3.6",
"@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.45.0",
"@prometheus-io/codemirror-promql": "0.46.0",
"@testing-library/react-hooks": "^7.0.2",
"@types/enzyme": "^3.10.13",
"@types/flot": "0.0.32",
@ -23487,7 +23487,7 @@
"@lezer/common": "^1.0.3",
"@lezer/highlight": "^1.1.6",
"@lezer/lr": "^1.3.6",
"@prometheus-io/lezer-promql": "0.45.0",
"@prometheus-io/lezer-promql": "0.46.0",
"@types/lru-cache": "^5.1.1",
"isomorphic-fetch": "^3.0.0",
"lru-cache": "^6.0.0",

View file

@ -1,6 +1,6 @@
{
"name": "@prometheus-io/app",
"version": "0.45.0",
"version": "0.46.0",
"private": true,
"dependencies": {
"@codemirror/autocomplete": "^6.7.1",
@ -19,7 +19,7 @@
"@lezer/common": "^1.0.3",
"@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.45.0",
"@prometheus-io/codemirror-promql": "0.46.0",
"bootstrap": "^4.6.2",
"css.escape": "^1.5.1",
"downshift": "^7.6.0",

View file

@ -259,6 +259,7 @@ type Options struct {
RemoteReadConcurrencyLimit int
RemoteReadBytesInFrame int
EnableRemoteWriteReceiver bool
EnableOTLPWriteReceiver bool
IsAgent bool
AppName string
@ -317,7 +318,7 @@ func New(logger log.Logger, o *Options) *Handler {
FactoryRr := func(_ context.Context) api_v1.RulesRetriever { return h.ruleManager }
var app storage.Appendable
if o.EnableRemoteWriteReceiver {
if o.EnableRemoteWriteReceiver || o.EnableOTLPWriteReceiver {
app = h.storage
}
@ -349,6 +350,8 @@ func New(logger log.Logger, o *Options) *Handler {
o.Gatherer,
o.Registerer,
nil,
o.EnableRemoteWriteReceiver,
o.EnableOTLPWriteReceiver,
)
if o.RoutePrefix != "/" {