2015-08-24 10:19:21 -07:00
|
|
|
// Copyright 2015 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2015-06-22 13:46:55 -07:00
|
|
|
package web
|
|
|
|
|
|
|
|
import (
|
2023-11-07 19:49:39 -08:00
|
|
|
"errors"
|
2018-08-17 08:24:35 -07:00
|
|
|
"fmt"
|
2015-06-22 13:46:55 -07:00
|
|
|
"net/http"
|
2024-01-15 08:24:46 -08:00
|
|
|
"slices"
|
2016-12-30 10:34:45 -08:00
|
|
|
"sort"
|
2023-09-21 13:53:51 -07:00
|
|
|
"strings"
|
2015-06-22 13:46:55 -07:00
|
|
|
|
2016-12-30 10:34:45 -08:00
|
|
|
"github.com/gogo/protobuf/proto"
|
2016-12-06 07:09:50 -08:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2016-12-30 10:34:45 -08:00
|
|
|
dto "github.com/prometheus/client_model/go"
|
2015-08-21 04:16:50 -07:00
|
|
|
"github.com/prometheus/common/expfmt"
|
2015-08-20 08:18:46 -07:00
|
|
|
"github.com/prometheus/common/model"
|
2015-09-01 09:47:48 -07:00
|
|
|
|
2023-01-09 03:36:15 -08:00
|
|
|
"github.com/prometheus/prometheus/model/histogram"
|
2021-11-08 06:23:17 -08:00
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
|
|
|
"github.com/prometheus/prometheus/model/timestamp"
|
|
|
|
"github.com/prometheus/prometheus/model/value"
|
2016-07-11 11:27:25 -07:00
|
|
|
"github.com/prometheus/prometheus/promql"
|
2020-02-03 10:06:39 -08:00
|
|
|
"github.com/prometheus/prometheus/promql/parser"
|
2016-12-30 10:34:45 -08:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
2020-10-22 02:00:08 -07:00
|
|
|
"github.com/prometheus/prometheus/tsdb"
|
2021-11-28 23:54:23 -08:00
|
|
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
2015-06-22 13:46:55 -07:00
|
|
|
)
|
|
|
|
|
2016-12-06 07:09:50 -08:00
|
|
|
var (
|
|
|
|
federationErrors = prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Name: "prometheus_web_federation_errors_total",
|
|
|
|
Help: "Total number of errors that occurred while sending federation responses.",
|
|
|
|
})
|
2018-11-30 06:27:12 -08:00
|
|
|
federationWarnings = prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Name: "prometheus_web_federation_warnings_total",
|
|
|
|
Help: "Total number of warnings that occurred while sending federation responses.",
|
|
|
|
})
|
2016-12-06 07:09:50 -08:00
|
|
|
)
|
|
|
|
|
2020-04-06 01:05:01 -07:00
|
|
|
func registerFederationMetrics(r prometheus.Registerer) {
|
|
|
|
r.MustRegister(federationWarnings, federationErrors)
|
|
|
|
}
|
|
|
|
|
2015-09-01 09:47:48 -07:00
|
|
|
func (h *Handler) federation(w http.ResponseWriter, req *http.Request) {
|
|
|
|
h.mtx.RLock()
|
|
|
|
defer h.mtx.RUnlock()
|
2015-06-22 13:46:55 -07:00
|
|
|
|
2023-09-12 03:37:38 -07:00
|
|
|
ctx := req.Context()
|
|
|
|
|
2018-08-17 08:24:35 -07:00
|
|
|
if err := req.ParseForm(); err != nil {
|
|
|
|
http.Error(w, fmt.Sprintf("error parsing form values: %v", err), http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
2015-06-22 13:46:55 -07:00
|
|
|
|
2024-01-15 02:29:53 -08:00
|
|
|
matcherSets, err := parser.ParseMetricSelectors(req.Form["match[]"])
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
|
|
return
|
2015-06-22 13:46:55 -07:00
|
|
|
}
|
|
|
|
|
2015-12-16 04:45:44 -08:00
|
|
|
var (
|
2020-02-09 15:58:23 -08:00
|
|
|
mint = timestamp.FromTime(h.now().Time().Add(-h.lookbackDelta))
|
2016-12-30 10:34:45 -08:00
|
|
|
maxt = timestamp.FromTime(h.now().Time())
|
2016-12-24 15:37:46 -08:00
|
|
|
format = expfmt.Negotiate(req.Header)
|
2016-12-30 10:34:45 -08:00
|
|
|
enc = expfmt.NewEncoder(w, format)
|
2015-12-16 04:45:44 -08:00
|
|
|
)
|
2015-08-21 04:16:50 -07:00
|
|
|
w.Header().Set("Content-Type", string(format))
|
|
|
|
|
2023-09-12 03:37:38 -07:00
|
|
|
q, err := h.localStorage.Querier(mint, maxt)
|
2016-12-30 10:34:45 -08:00
|
|
|
if err != nil {
|
|
|
|
federationErrors.Inc()
|
2023-11-07 19:49:39 -08:00
|
|
|
if errors.Is(err, tsdb.ErrNotReady) {
|
2020-04-29 09:16:14 -07:00
|
|
|
http.Error(w, err.Error(), http.StatusServiceUnavailable)
|
|
|
|
return
|
|
|
|
}
|
2016-12-30 10:34:45 -08:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer q.Close()
|
|
|
|
|
|
|
|
vec := make(promql.Vector, 0, 8000)
|
|
|
|
|
2020-03-12 02:36:09 -07:00
|
|
|
hints := &storage.SelectHints{Start: mint, End: maxt}
|
2018-08-28 03:23:31 -07:00
|
|
|
|
2017-12-10 03:00:23 -08:00
|
|
|
var sets []storage.SeriesSet
|
2016-12-30 10:34:45 -08:00
|
|
|
for _, mset := range matcherSets {
|
2023-09-12 03:37:38 -07:00
|
|
|
s := q.Select(ctx, true, hints, mset...)
|
2017-12-10 03:00:23 -08:00
|
|
|
sets = append(sets, s)
|
2017-04-04 02:13:46 -07:00
|
|
|
}
|
2016-12-30 10:34:45 -08:00
|
|
|
|
2024-11-07 08:52:55 -08:00
|
|
|
set := storage.NewMergeSeriesSet(sets, 0, storage.ChainedSeriesMerge)
|
2020-02-09 15:58:23 -08:00
|
|
|
it := storage.NewBuffer(int64(h.lookbackDelta / 1e6))
|
2022-09-20 10:16:45 -07:00
|
|
|
var chkIter chunkenc.Iterator
|
2023-01-09 03:36:15 -08:00
|
|
|
Loop:
|
2017-04-04 02:13:46 -07:00
|
|
|
for set.Next() {
|
|
|
|
s := set.At()
|
|
|
|
|
|
|
|
// TODO(fabxc): allow fast path for most recent sample either
|
|
|
|
// in the storage itself or caching layer in Prometheus.
|
2022-09-20 10:16:45 -07:00
|
|
|
chkIter = s.Iterator(chkIter)
|
|
|
|
it.Reset(chkIter)
|
2017-04-04 02:13:46 -07:00
|
|
|
|
2023-01-09 03:36:15 -08:00
|
|
|
var (
|
|
|
|
t int64
|
2022-12-08 04:31:08 -08:00
|
|
|
f float64
|
2023-01-12 06:20:50 -08:00
|
|
|
fh *histogram.FloatHistogram
|
2023-01-09 03:36:15 -08:00
|
|
|
)
|
2021-11-28 23:54:23 -08:00
|
|
|
valueType := it.Seek(maxt)
|
2023-01-09 03:36:15 -08:00
|
|
|
switch valueType {
|
|
|
|
case chunkenc.ValFloat:
|
2022-12-08 04:31:08 -08:00
|
|
|
t, f = it.At()
|
2023-01-09 03:36:15 -08:00
|
|
|
case chunkenc.ValFloatHistogram, chunkenc.ValHistogram:
|
2024-01-23 08:02:14 -08:00
|
|
|
t, fh = it.AtFloatHistogram(nil)
|
2023-01-09 03:36:15 -08:00
|
|
|
default:
|
2022-12-08 04:31:08 -08:00
|
|
|
sample, ok := it.PeekBack(1)
|
2017-04-04 02:13:46 -07:00
|
|
|
if !ok {
|
2023-01-09 03:36:15 -08:00
|
|
|
continue Loop
|
2017-04-04 02:13:46 -07:00
|
|
|
}
|
2022-12-08 04:31:08 -08:00
|
|
|
t = sample.T()
|
|
|
|
switch sample.Type() {
|
|
|
|
case chunkenc.ValFloat:
|
2023-03-30 10:50:13 -07:00
|
|
|
f = sample.F()
|
2022-12-08 04:31:08 -08:00
|
|
|
case chunkenc.ValHistogram:
|
2023-11-29 06:15:57 -08:00
|
|
|
fh = sample.H().ToFloat(nil)
|
2022-12-08 04:31:08 -08:00
|
|
|
case chunkenc.ValFloatHistogram:
|
|
|
|
fh = sample.FH()
|
|
|
|
default:
|
|
|
|
continue Loop
|
2023-01-12 06:20:50 -08:00
|
|
|
}
|
2016-12-30 10:34:45 -08:00
|
|
|
}
|
2017-05-23 10:03:57 -07:00
|
|
|
// The exposition formats do not support stale markers, so drop them. This
|
|
|
|
// is good enough for staleness handling of federated data, as the
|
|
|
|
// interval-based limits on staleness will do the right thing for supported
|
|
|
|
// use cases (which is to say federating aggregated time series).
|
2022-12-08 04:31:08 -08:00
|
|
|
if value.IsStaleNaN(f) || (fh != nil && value.IsStaleNaN(fh.Sum)) {
|
2017-05-23 10:03:57 -07:00
|
|
|
continue
|
|
|
|
}
|
2017-04-04 02:13:46 -07:00
|
|
|
|
|
|
|
vec = append(vec, promql.Sample{
|
|
|
|
Metric: s.Labels(),
|
promql: Separate `Point` into `FPoint` and `HPoint`
In other words: Instead of having a “polymorphous” `Point` that can
either contain a float value or a histogram value, use an `FPoint` for
floats and an `HPoint` for histograms.
This seemingly small change has a _lot_ of repercussions throughout
the codebase.
The idea here is to avoid the increase in size of `Point` arrays that
happened after native histograms had been added.
The higher-level data structures (`Sample`, `Series`, etc.) are still
“polymorphous”. The same idea could be applied to them, but at each
step the trade-offs needed to be evaluated.
The idea with this change is to do the minimum necessary to get back
to pre-histogram performance for functions that do not touch
histograms. Here are comparisons for the `changes` function. The test
data doesn't include histograms yet. Ideally, there would be no change
in the benchmark result at all.
First runtime v2.39 compared to directly prior to this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 542µs ± 1% +38.58% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 617µs ± 2% +36.48% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.36ms ± 2% +21.58% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 8.94ms ± 1% +14.21% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.30ms ± 1% +10.67% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.10ms ± 1% +11.82% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 11.8ms ± 1% +12.50% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 87.4ms ± 1% +12.63% (p=0.000 n=9+9)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 32.8ms ± 1% +8.01% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.6ms ± 2% +9.64% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 117ms ± 1% +11.69% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 876ms ± 1% +11.83% (p=0.000 n=9+10)
```
And then runtime v2.39 compared to after this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 547µs ± 1% +39.84% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 616µs ± 2% +36.15% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.26ms ± 1% +12.20% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 7.95ms ± 1% +1.59% (p=0.000 n=10+8)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.38ms ± 2% +13.49% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.02ms ± 1% +9.80% (p=0.000 n=10+9)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 10.8ms ± 1% +3.08% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 78.1ms ± 1% +0.58% (p=0.035 n=9+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 33.5ms ± 4% +10.18% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.0ms ± 1% +7.98% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 107ms ± 1% +1.92% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 775ms ± 1% -1.02% (p=0.019 n=9+9)
```
In summary, the runtime doesn't really improve with this change for
queries with just a few steps. For queries with many steps, this
commit essentially reinstates the old performance. This is good
because the many-step queries are the one that matter most (longest
absolute runtime).
In terms of allocations, though, this commit doesn't make a dent at
all (numbers not shown). The reason is that most of the allocations
happen in the sampleRingIterator (in the storage package), which has
to be addressed in a separate commit.
Signed-off-by: beorn7 <beorn@grafana.com>
2022-10-28 07:58:40 -07:00
|
|
|
T: t,
|
2022-12-08 04:31:08 -08:00
|
|
|
F: f,
|
promql: Separate `Point` into `FPoint` and `HPoint`
In other words: Instead of having a “polymorphous” `Point` that can
either contain a float value or a histogram value, use an `FPoint` for
floats and an `HPoint` for histograms.
This seemingly small change has a _lot_ of repercussions throughout
the codebase.
The idea here is to avoid the increase in size of `Point` arrays that
happened after native histograms had been added.
The higher-level data structures (`Sample`, `Series`, etc.) are still
“polymorphous”. The same idea could be applied to them, but at each
step the trade-offs needed to be evaluated.
The idea with this change is to do the minimum necessary to get back
to pre-histogram performance for functions that do not touch
histograms. Here are comparisons for the `changes` function. The test
data doesn't include histograms yet. Ideally, there would be no change
in the benchmark result at all.
First runtime v2.39 compared to directly prior to this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 542µs ± 1% +38.58% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 617µs ± 2% +36.48% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.36ms ± 2% +21.58% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 8.94ms ± 1% +14.21% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.30ms ± 1% +10.67% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.10ms ± 1% +11.82% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 11.8ms ± 1% +12.50% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 87.4ms ± 1% +12.63% (p=0.000 n=9+9)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 32.8ms ± 1% +8.01% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.6ms ± 2% +9.64% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 117ms ± 1% +11.69% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 876ms ± 1% +11.83% (p=0.000 n=9+10)
```
And then runtime v2.39 compared to after this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 547µs ± 1% +39.84% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 616µs ± 2% +36.15% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.26ms ± 1% +12.20% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 7.95ms ± 1% +1.59% (p=0.000 n=10+8)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.38ms ± 2% +13.49% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.02ms ± 1% +9.80% (p=0.000 n=10+9)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 10.8ms ± 1% +3.08% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 78.1ms ± 1% +0.58% (p=0.035 n=9+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 33.5ms ± 4% +10.18% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.0ms ± 1% +7.98% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 107ms ± 1% +1.92% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 775ms ± 1% -1.02% (p=0.019 n=9+9)
```
In summary, the runtime doesn't really improve with this change for
queries with just a few steps. For queries with many steps, this
commit essentially reinstates the old performance. This is good
because the many-step queries are the one that matter most (longest
absolute runtime).
In terms of allocations, though, this commit doesn't make a dent at
all (numbers not shown). The reason is that most of the allocations
happen in the sampleRingIterator (in the storage package), which has
to be addressed in a separate commit.
Signed-off-by: beorn7 <beorn@grafana.com>
2022-10-28 07:58:40 -07:00
|
|
|
H: fh,
|
2017-04-04 02:13:46 -07:00
|
|
|
})
|
|
|
|
}
|
2020-06-09 09:57:31 -07:00
|
|
|
if ws := set.Warnings(); len(ws) > 0 {
|
2024-09-09 18:41:53 -07:00
|
|
|
h.logger.Debug("Federation select returned warnings", "warnings", ws)
|
2020-06-09 09:57:31 -07:00
|
|
|
federationWarnings.Add(float64(len(ws)))
|
|
|
|
}
|
2017-04-04 02:13:46 -07:00
|
|
|
if set.Err() != nil {
|
|
|
|
federationErrors.Inc()
|
2019-05-03 06:11:28 -07:00
|
|
|
http.Error(w, set.Err().Error(), http.StatusInternalServerError)
|
2017-04-04 02:13:46 -07:00
|
|
|
return
|
2016-12-30 10:34:45 -08:00
|
|
|
}
|
|
|
|
|
2023-09-21 13:53:51 -07:00
|
|
|
slices.SortFunc(vec, func(a, b promql.Sample) int {
|
2023-07-08 05:45:56 -07:00
|
|
|
ni := a.Metric.Get(labels.MetricName)
|
|
|
|
nj := b.Metric.Get(labels.MetricName)
|
2023-09-21 13:53:51 -07:00
|
|
|
return strings.Compare(ni, nj)
|
2023-07-08 05:45:56 -07:00
|
|
|
})
|
2016-12-30 10:34:45 -08:00
|
|
|
|
2019-03-08 08:29:25 -08:00
|
|
|
externalLabels := h.config.GlobalConfig.ExternalLabels.Map()
|
2017-03-27 08:18:33 -07:00
|
|
|
if _, ok := externalLabels[model.InstanceLabel]; !ok {
|
|
|
|
externalLabels[model.InstanceLabel] = ""
|
|
|
|
}
|
2019-03-08 08:29:25 -08:00
|
|
|
externalLabelNames := make([]string, 0, len(externalLabels))
|
2017-03-27 08:18:33 -07:00
|
|
|
for ln := range externalLabels {
|
|
|
|
externalLabelNames = append(externalLabelNames, ln)
|
|
|
|
}
|
2019-03-08 08:29:25 -08:00
|
|
|
sort.Strings(externalLabelNames)
|
2017-03-27 08:18:33 -07:00
|
|
|
|
2016-12-30 10:34:45 -08:00
|
|
|
var (
|
2023-01-09 03:36:15 -08:00
|
|
|
lastMetricName string
|
|
|
|
lastWasHistogram, lastHistogramWasGauge bool
|
|
|
|
protMetricFam *dto.MetricFamily
|
2016-12-30 10:34:45 -08:00
|
|
|
)
|
|
|
|
for _, s := range vec {
|
2023-01-09 03:36:15 -08:00
|
|
|
isHistogram := s.H != nil
|
2024-02-15 11:25:12 -08:00
|
|
|
formatType := format.FormatType()
|
2023-01-09 03:36:15 -08:00
|
|
|
if isHistogram &&
|
2024-02-15 11:25:12 -08:00
|
|
|
formatType != expfmt.TypeProtoDelim && formatType != expfmt.TypeProtoText && formatType != expfmt.TypeProtoCompact {
|
2023-01-09 03:36:15 -08:00
|
|
|
// Can't serve the native histogram.
|
|
|
|
// TODO(codesome): Serve them when other protocols get the native histogram support.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2016-12-30 10:34:45 -08:00
|
|
|
nameSeen := false
|
|
|
|
globalUsed := map[string]struct{}{}
|
2023-01-09 03:36:15 -08:00
|
|
|
protMetric := &dto.Metric{}
|
2016-12-30 10:34:45 -08:00
|
|
|
|
2022-02-27 06:19:11 -08:00
|
|
|
err := s.Metric.Validate(func(l labels.Label) error {
|
2016-12-30 10:34:45 -08:00
|
|
|
if l.Value == "" {
|
|
|
|
// No value means unset. Never consider those labels.
|
|
|
|
// This is also important to protect against nameless metrics.
|
2022-02-27 06:19:11 -08:00
|
|
|
return nil
|
2016-12-30 10:34:45 -08:00
|
|
|
}
|
|
|
|
if l.Name == labels.MetricName {
|
|
|
|
nameSeen = true
|
2023-01-09 03:36:15 -08:00
|
|
|
if l.Value == lastMetricName && // We already have the name in the current MetricFamily, and we ignore nameless metrics.
|
|
|
|
lastWasHistogram == isHistogram && // The sample type matches (float vs histogram).
|
|
|
|
// If it was a histogram, the histogram type (counter vs gauge) also matches.
|
|
|
|
(!isHistogram || lastHistogramWasGauge == (s.H.CounterResetHint == histogram.GaugeType)) {
|
2022-02-27 06:19:11 -08:00
|
|
|
return nil
|
2016-12-30 10:34:45 -08:00
|
|
|
}
|
2023-01-09 03:36:15 -08:00
|
|
|
|
|
|
|
// Since we now check for the sample type and type of histogram above, we will end up
|
|
|
|
// creating multiple metric families for the same metric name. This would technically be
|
|
|
|
// an invalid exposition. But since the consumer of this is Prometheus, and Prometheus can
|
|
|
|
// parse it fine, we allow it and bend the rules to make federation possible in those cases.
|
|
|
|
|
2016-12-30 10:34:45 -08:00
|
|
|
// Need to start a new MetricFamily. Ship off the old one (if any) before
|
|
|
|
// creating the new one.
|
|
|
|
if protMetricFam != nil {
|
|
|
|
if err := enc.Encode(protMetricFam); err != nil {
|
2022-02-27 06:19:11 -08:00
|
|
|
return err
|
2016-12-30 10:34:45 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
protMetricFam = &dto.MetricFamily{
|
|
|
|
Type: dto.MetricType_UNTYPED.Enum(),
|
|
|
|
Name: proto.String(l.Value),
|
|
|
|
}
|
2023-01-09 03:36:15 -08:00
|
|
|
if isHistogram {
|
|
|
|
if s.H.CounterResetHint == histogram.GaugeType {
|
|
|
|
protMetricFam.Type = dto.MetricType_GAUGE_HISTOGRAM.Enum()
|
|
|
|
} else {
|
|
|
|
protMetricFam.Type = dto.MetricType_HISTOGRAM.Enum()
|
|
|
|
}
|
|
|
|
}
|
2016-12-30 10:34:45 -08:00
|
|
|
lastMetricName = l.Value
|
2022-02-27 06:19:11 -08:00
|
|
|
return nil
|
2016-12-30 10:34:45 -08:00
|
|
|
}
|
|
|
|
protMetric.Label = append(protMetric.Label, &dto.LabelPair{
|
|
|
|
Name: proto.String(l.Name),
|
|
|
|
Value: proto.String(l.Value),
|
|
|
|
})
|
2019-03-08 08:29:25 -08:00
|
|
|
if _, ok := externalLabels[l.Name]; ok {
|
2016-12-30 10:34:45 -08:00
|
|
|
globalUsed[l.Name] = struct{}{}
|
|
|
|
}
|
2022-02-27 06:19:11 -08:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
federationErrors.Inc()
|
2024-09-09 18:41:53 -07:00
|
|
|
h.logger.Error("federation failed", "err", err)
|
2022-02-27 06:19:11 -08:00
|
|
|
return
|
2016-12-30 10:34:45 -08:00
|
|
|
}
|
|
|
|
if !nameSeen {
|
2024-09-09 18:41:53 -07:00
|
|
|
h.logger.Warn("Ignoring nameless metric during federation", "metric", s.Metric)
|
2016-12-30 10:34:45 -08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Attach global labels if they do not exist yet.
|
2017-04-05 05:53:34 -07:00
|
|
|
for _, ln := range externalLabelNames {
|
|
|
|
lv := externalLabels[ln]
|
2020-04-07 08:42:42 -07:00
|
|
|
if _, ok := globalUsed[ln]; !ok {
|
2016-12-30 10:34:45 -08:00
|
|
|
protMetric.Label = append(protMetric.Label, &dto.LabelPair{
|
2020-04-07 08:42:42 -07:00
|
|
|
Name: proto.String(ln),
|
|
|
|
Value: proto.String(lv),
|
2016-12-30 10:34:45 -08:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
protMetric.TimestampMs = proto.Int64(s.T)
|
2023-01-09 03:36:15 -08:00
|
|
|
if !isHistogram {
|
|
|
|
lastHistogramWasGauge = false
|
|
|
|
protMetric.Untyped = &dto.Untyped{
|
promql: Separate `Point` into `FPoint` and `HPoint`
In other words: Instead of having a “polymorphous” `Point` that can
either contain a float value or a histogram value, use an `FPoint` for
floats and an `HPoint` for histograms.
This seemingly small change has a _lot_ of repercussions throughout
the codebase.
The idea here is to avoid the increase in size of `Point` arrays that
happened after native histograms had been added.
The higher-level data structures (`Sample`, `Series`, etc.) are still
“polymorphous”. The same idea could be applied to them, but at each
step the trade-offs needed to be evaluated.
The idea with this change is to do the minimum necessary to get back
to pre-histogram performance for functions that do not touch
histograms. Here are comparisons for the `changes` function. The test
data doesn't include histograms yet. Ideally, there would be no change
in the benchmark result at all.
First runtime v2.39 compared to directly prior to this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 542µs ± 1% +38.58% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 617µs ± 2% +36.48% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.36ms ± 2% +21.58% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 8.94ms ± 1% +14.21% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.30ms ± 1% +10.67% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.10ms ± 1% +11.82% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 11.8ms ± 1% +12.50% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 87.4ms ± 1% +12.63% (p=0.000 n=9+9)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 32.8ms ± 1% +8.01% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.6ms ± 2% +9.64% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 117ms ± 1% +11.69% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 876ms ± 1% +11.83% (p=0.000 n=9+10)
```
And then runtime v2.39 compared to after this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 547µs ± 1% +39.84% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 616µs ± 2% +36.15% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.26ms ± 1% +12.20% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 7.95ms ± 1% +1.59% (p=0.000 n=10+8)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.38ms ± 2% +13.49% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.02ms ± 1% +9.80% (p=0.000 n=10+9)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 10.8ms ± 1% +3.08% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 78.1ms ± 1% +0.58% (p=0.035 n=9+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 33.5ms ± 4% +10.18% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.0ms ± 1% +7.98% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 107ms ± 1% +1.92% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 775ms ± 1% -1.02% (p=0.019 n=9+9)
```
In summary, the runtime doesn't really improve with this change for
queries with just a few steps. For queries with many steps, this
commit essentially reinstates the old performance. This is good
because the many-step queries are the one that matter most (longest
absolute runtime).
In terms of allocations, though, this commit doesn't make a dent at
all (numbers not shown). The reason is that most of the allocations
happen in the sampleRingIterator (in the storage package), which has
to be addressed in a separate commit.
Signed-off-by: beorn7 <beorn@grafana.com>
2022-10-28 07:58:40 -07:00
|
|
|
Value: proto.Float64(s.F),
|
2023-01-09 03:36:15 -08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
lastHistogramWasGauge = s.H.CounterResetHint == histogram.GaugeType
|
|
|
|
protMetric.Histogram = &dto.Histogram{
|
|
|
|
SampleCountFloat: proto.Float64(s.H.Count),
|
|
|
|
SampleSum: proto.Float64(s.H.Sum),
|
|
|
|
Schema: proto.Int32(s.H.Schema),
|
|
|
|
ZeroThreshold: proto.Float64(s.H.ZeroThreshold),
|
|
|
|
ZeroCountFloat: proto.Float64(s.H.ZeroCount),
|
|
|
|
NegativeCount: s.H.NegativeBuckets,
|
|
|
|
PositiveCount: s.H.PositiveBuckets,
|
|
|
|
}
|
|
|
|
if len(s.H.PositiveSpans) > 0 {
|
|
|
|
protMetric.Histogram.PositiveSpan = make([]*dto.BucketSpan, len(s.H.PositiveSpans))
|
|
|
|
for i, sp := range s.H.PositiveSpans {
|
|
|
|
protMetric.Histogram.PositiveSpan[i] = &dto.BucketSpan{
|
|
|
|
Offset: proto.Int32(sp.Offset),
|
|
|
|
Length: proto.Uint32(sp.Length),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(s.H.NegativeSpans) > 0 {
|
|
|
|
protMetric.Histogram.NegativeSpan = make([]*dto.BucketSpan, len(s.H.NegativeSpans))
|
|
|
|
for i, sp := range s.H.NegativeSpans {
|
|
|
|
protMetric.Histogram.NegativeSpan[i] = &dto.BucketSpan{
|
|
|
|
Offset: proto.Int32(sp.Offset),
|
|
|
|
Length: proto.Uint32(sp.Length),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
lastWasHistogram = isHistogram
|
2016-12-30 10:34:45 -08:00
|
|
|
protMetricFam.Metric = append(protMetricFam.Metric, protMetric)
|
|
|
|
}
|
|
|
|
// Still have to ship off the last MetricFamily, if any.
|
|
|
|
if protMetricFam != nil {
|
|
|
|
if err := enc.Encode(protMetricFam); err != nil {
|
|
|
|
federationErrors.Inc()
|
2024-09-09 18:41:53 -07:00
|
|
|
h.logger.Error("federation failed", "err", err)
|
2016-12-30 10:34:45 -08:00
|
|
|
}
|
|
|
|
}
|
2015-06-22 13:46:55 -07:00
|
|
|
}
|