2016-09-08 08:39:52 -07:00
|
|
|
// Copyright 2016 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package web
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2020-04-29 09:16:14 -07:00
|
|
|
"context"
|
2023-01-09 05:05:04 -08:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2020-04-29 09:16:14 -07:00
|
|
|
"net/http"
|
2016-09-08 08:39:52 -07:00
|
|
|
"net/http/httptest"
|
|
|
|
"sort"
|
|
|
|
"strings"
|
|
|
|
"testing"
|
2020-02-09 15:58:23 -08:00
|
|
|
"time"
|
2016-09-08 08:39:52 -07:00
|
|
|
|
2020-04-29 09:16:14 -07:00
|
|
|
"github.com/pkg/errors"
|
2016-09-08 08:39:52 -07:00
|
|
|
"github.com/prometheus/common/model"
|
2020-10-29 02:43:23 -07:00
|
|
|
"github.com/stretchr/testify/require"
|
2020-10-22 02:00:08 -07:00
|
|
|
|
2017-05-11 08:09:24 -07:00
|
|
|
"github.com/prometheus/prometheus/config"
|
2023-01-09 05:05:04 -08:00
|
|
|
"github.com/prometheus/prometheus/model/histogram"
|
2021-11-08 06:23:17 -08:00
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
2023-01-09 05:05:04 -08:00
|
|
|
"github.com/prometheus/prometheus/model/textparse"
|
2016-09-08 08:39:52 -07:00
|
|
|
"github.com/prometheus/prometheus/promql"
|
2020-04-29 09:16:14 -07:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
|
|
|
"github.com/prometheus/prometheus/tsdb"
|
2016-09-08 08:39:52 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
var scenarios = map[string]struct {
|
2017-03-27 06:58:34 -07:00
|
|
|
params string
|
2019-03-08 08:29:25 -08:00
|
|
|
externalLabels labels.Labels
|
2017-03-27 06:58:34 -07:00
|
|
|
code int
|
|
|
|
body string
|
2016-09-08 08:39:52 -07:00
|
|
|
}{
|
|
|
|
"empty": {
|
|
|
|
params: "",
|
|
|
|
code: 200,
|
|
|
|
body: ``,
|
|
|
|
},
|
2016-09-15 06:23:55 -07:00
|
|
|
"match nothing": {
|
|
|
|
params: "match[]=does_not_match_anything",
|
|
|
|
code: 200,
|
|
|
|
body: ``,
|
|
|
|
},
|
2016-09-08 08:39:52 -07:00
|
|
|
"invalid params from the beginning": {
|
|
|
|
params: "match[]=-not-a-valid-metric-name",
|
|
|
|
code: 400,
|
2020-01-08 03:04:47 -08:00
|
|
|
body: `1:1: parse error: unexpected <op:->
|
2016-09-08 08:39:52 -07:00
|
|
|
`,
|
|
|
|
},
|
2018-04-27 05:04:02 -07:00
|
|
|
"invalid params somewhere in the middle": {
|
2016-09-08 08:39:52 -07:00
|
|
|
params: "match[]=not-a-valid-metric-name",
|
|
|
|
code: 400,
|
2020-01-08 03:04:47 -08:00
|
|
|
body: `1:4: parse error: unexpected <op:->
|
2016-09-08 08:39:52 -07:00
|
|
|
`,
|
|
|
|
},
|
|
|
|
"test_metric1": {
|
|
|
|
params: "match[]=test_metric1",
|
|
|
|
code: 200,
|
|
|
|
body: `# TYPE test_metric1 untyped
|
2017-03-27 08:18:33 -07:00
|
|
|
test_metric1{foo="bar",instance="i"} 10000 6000000
|
|
|
|
test_metric1{foo="boo",instance="i"} 1 6000000
|
2016-09-08 08:39:52 -07:00
|
|
|
`,
|
|
|
|
},
|
|
|
|
"test_metric2": {
|
|
|
|
params: "match[]=test_metric2",
|
|
|
|
code: 200,
|
|
|
|
body: `# TYPE test_metric2 untyped
|
2017-03-27 08:18:33 -07:00
|
|
|
test_metric2{foo="boo",instance="i"} 1 6000000
|
2016-09-08 08:39:52 -07:00
|
|
|
`,
|
|
|
|
},
|
|
|
|
"test_metric_without_labels": {
|
|
|
|
params: "match[]=test_metric_without_labels",
|
|
|
|
code: 200,
|
|
|
|
body: `# TYPE test_metric_without_labels untyped
|
2017-03-27 08:18:33 -07:00
|
|
|
test_metric_without_labels{instance=""} 1001 6000000
|
2017-05-23 10:03:57 -07:00
|
|
|
`,
|
|
|
|
},
|
|
|
|
"test_stale_metric": {
|
|
|
|
params: "match[]=test_metric_stale",
|
|
|
|
code: 200,
|
|
|
|
body: ``,
|
|
|
|
},
|
|
|
|
"test_old_metric": {
|
|
|
|
params: "match[]=test_metric_old",
|
|
|
|
code: 200,
|
|
|
|
body: `# TYPE test_metric_old untyped
|
|
|
|
test_metric_old{instance=""} 981 5880000
|
2016-09-08 08:39:52 -07:00
|
|
|
`,
|
|
|
|
},
|
|
|
|
"{foo='boo'}": {
|
|
|
|
params: "match[]={foo='boo'}",
|
|
|
|
code: 200,
|
|
|
|
body: `# TYPE test_metric1 untyped
|
2017-03-27 08:18:33 -07:00
|
|
|
test_metric1{foo="boo",instance="i"} 1 6000000
|
2016-09-08 08:39:52 -07:00
|
|
|
# TYPE test_metric2 untyped
|
2017-03-27 08:18:33 -07:00
|
|
|
test_metric2{foo="boo",instance="i"} 1 6000000
|
2016-09-08 08:39:52 -07:00
|
|
|
`,
|
|
|
|
},
|
|
|
|
"two matchers": {
|
|
|
|
params: "match[]=test_metric1&match[]=test_metric2",
|
|
|
|
code: 200,
|
|
|
|
body: `# TYPE test_metric1 untyped
|
2017-03-27 08:18:33 -07:00
|
|
|
test_metric1{foo="bar",instance="i"} 10000 6000000
|
|
|
|
test_metric1{foo="boo",instance="i"} 1 6000000
|
2016-09-08 08:39:52 -07:00
|
|
|
# TYPE test_metric2 untyped
|
2017-03-27 08:18:33 -07:00
|
|
|
test_metric2{foo="boo",instance="i"} 1 6000000
|
2021-06-24 11:32:23 -07:00
|
|
|
`,
|
|
|
|
},
|
|
|
|
"two matchers with overlap": {
|
|
|
|
params: "match[]={__name__=~'test_metric1'}&match[]={foo='bar'}",
|
|
|
|
code: 200,
|
|
|
|
body: `# TYPE test_metric1 untyped
|
|
|
|
test_metric1{foo="bar",instance="i"} 10000 6000000
|
|
|
|
test_metric1{foo="boo",instance="i"} 1 6000000
|
2016-09-08 08:39:52 -07:00
|
|
|
`,
|
|
|
|
},
|
|
|
|
"everything": {
|
|
|
|
params: "match[]={__name__=~'.%2b'}", // '%2b' is an URL-encoded '+'.
|
|
|
|
code: 200,
|
|
|
|
body: `# TYPE test_metric1 untyped
|
2017-03-27 08:18:33 -07:00
|
|
|
test_metric1{foo="bar",instance="i"} 10000 6000000
|
|
|
|
test_metric1{foo="boo",instance="i"} 1 6000000
|
2016-09-08 08:39:52 -07:00
|
|
|
# TYPE test_metric2 untyped
|
2017-03-27 08:18:33 -07:00
|
|
|
test_metric2{foo="boo",instance="i"} 1 6000000
|
2017-05-23 10:03:57 -07:00
|
|
|
# TYPE test_metric_old untyped
|
|
|
|
test_metric_old{instance=""} 981 5880000
|
2016-09-08 08:39:52 -07:00
|
|
|
# TYPE test_metric_without_labels untyped
|
2017-03-27 08:18:33 -07:00
|
|
|
test_metric_without_labels{instance=""} 1001 6000000
|
2016-09-15 06:23:55 -07:00
|
|
|
`,
|
|
|
|
},
|
|
|
|
"empty label value matches everything that doesn't have that label": {
|
|
|
|
params: "match[]={foo='',__name__=~'.%2b'}",
|
|
|
|
code: 200,
|
2017-05-23 10:03:57 -07:00
|
|
|
body: `# TYPE test_metric_old untyped
|
|
|
|
test_metric_old{instance=""} 981 5880000
|
|
|
|
# TYPE test_metric_without_labels untyped
|
2017-03-27 08:18:33 -07:00
|
|
|
test_metric_without_labels{instance=""} 1001 6000000
|
2016-09-15 06:23:55 -07:00
|
|
|
`,
|
|
|
|
},
|
|
|
|
"empty label value for a label that doesn't exist at all, matches everything": {
|
|
|
|
params: "match[]={bar='',__name__=~'.%2b'}",
|
|
|
|
code: 200,
|
|
|
|
body: `# TYPE test_metric1 untyped
|
2017-03-27 08:18:33 -07:00
|
|
|
test_metric1{foo="bar",instance="i"} 10000 6000000
|
|
|
|
test_metric1{foo="boo",instance="i"} 1 6000000
|
2016-09-15 06:23:55 -07:00
|
|
|
# TYPE test_metric2 untyped
|
2017-03-27 08:18:33 -07:00
|
|
|
test_metric2{foo="boo",instance="i"} 1 6000000
|
2017-05-23 10:03:57 -07:00
|
|
|
# TYPE test_metric_old untyped
|
|
|
|
test_metric_old{instance=""} 981 5880000
|
2016-09-15 06:23:55 -07:00
|
|
|
# TYPE test_metric_without_labels untyped
|
2017-03-27 08:18:33 -07:00
|
|
|
test_metric_without_labels{instance=""} 1001 6000000
|
2017-03-27 06:58:34 -07:00
|
|
|
`,
|
|
|
|
},
|
|
|
|
"external labels are added if not already present": {
|
|
|
|
params: "match[]={__name__=~'.%2b'}", // '%2b' is an URL-encoded '+'.
|
2022-02-27 06:19:21 -08:00
|
|
|
externalLabels: labels.FromStrings("foo", "baz", "zone", "ie"),
|
2017-03-27 06:58:34 -07:00
|
|
|
code: 200,
|
|
|
|
body: `# TYPE test_metric1 untyped
|
2017-03-27 08:18:33 -07:00
|
|
|
test_metric1{foo="bar",instance="i",zone="ie"} 10000 6000000
|
|
|
|
test_metric1{foo="boo",instance="i",zone="ie"} 1 6000000
|
2017-03-27 06:58:34 -07:00
|
|
|
# TYPE test_metric2 untyped
|
2017-03-27 08:18:33 -07:00
|
|
|
test_metric2{foo="boo",instance="i",zone="ie"} 1 6000000
|
2017-05-23 10:03:57 -07:00
|
|
|
# TYPE test_metric_old untyped
|
|
|
|
test_metric_old{foo="baz",instance="",zone="ie"} 981 5880000
|
2017-03-27 06:58:34 -07:00
|
|
|
# TYPE test_metric_without_labels untyped
|
2017-03-27 08:18:33 -07:00
|
|
|
test_metric_without_labels{foo="baz",instance="",zone="ie"} 1001 6000000
|
|
|
|
`,
|
|
|
|
},
|
|
|
|
"instance is an external label": {
|
|
|
|
// This makes no sense as a configuration, but we should
|
|
|
|
// know what it does anyway.
|
|
|
|
params: "match[]={__name__=~'.%2b'}", // '%2b' is an URL-encoded '+'.
|
2022-02-27 06:19:21 -08:00
|
|
|
externalLabels: labels.FromStrings("instance", "baz"),
|
2017-03-27 08:18:33 -07:00
|
|
|
code: 200,
|
|
|
|
body: `# TYPE test_metric1 untyped
|
|
|
|
test_metric1{foo="bar",instance="i"} 10000 6000000
|
|
|
|
test_metric1{foo="boo",instance="i"} 1 6000000
|
|
|
|
# TYPE test_metric2 untyped
|
|
|
|
test_metric2{foo="boo",instance="i"} 1 6000000
|
2017-05-23 10:03:57 -07:00
|
|
|
# TYPE test_metric_old untyped
|
|
|
|
test_metric_old{instance="baz"} 981 5880000
|
2017-03-27 08:18:33 -07:00
|
|
|
# TYPE test_metric_without_labels untyped
|
|
|
|
test_metric_without_labels{instance="baz"} 1001 6000000
|
2016-09-08 08:39:52 -07:00
|
|
|
`,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestFederation(t *testing.T) {
|
|
|
|
suite, err := promql.NewTest(t, `
|
|
|
|
load 1m
|
2017-03-27 08:18:33 -07:00
|
|
|
test_metric1{foo="bar",instance="i"} 0+100x100
|
|
|
|
test_metric1{foo="boo",instance="i"} 1+0x100
|
|
|
|
test_metric2{foo="boo",instance="i"} 1+0x100
|
2016-09-08 08:39:52 -07:00
|
|
|
test_metric_without_labels 1+10x100
|
2017-05-23 10:03:57 -07:00
|
|
|
test_metric_stale 1+10x99 stale
|
|
|
|
test_metric_old 1+10x98
|
2016-09-08 08:39:52 -07:00
|
|
|
`)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer suite.Close()
|
|
|
|
|
|
|
|
if err := suite.Run(); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
h := &Handler{
|
2020-04-29 09:16:14 -07:00
|
|
|
localStorage: &dbAdapter{suite.TSDB()},
|
2020-02-09 15:58:23 -08:00
|
|
|
lookbackDelta: 5 * time.Minute,
|
|
|
|
now: func() model.Time { return 101 * 60 * 1000 }, // 101min after epoch.
|
2017-05-11 08:09:24 -07:00
|
|
|
config: &config.Config{
|
|
|
|
GlobalConfig: config.GlobalConfig{},
|
|
|
|
},
|
2016-09-08 08:39:52 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
for name, scenario := range scenarios {
|
2020-04-29 09:16:14 -07:00
|
|
|
t.Run(name, func(t *testing.T) {
|
|
|
|
h.config.GlobalConfig.ExternalLabels = scenario.externalLabels
|
|
|
|
req := httptest.NewRequest("GET", "http://example.org/federate?"+scenario.params, nil)
|
|
|
|
res := httptest.NewRecorder()
|
|
|
|
|
|
|
|
h.federation(res, req)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, scenario.code, res.Code)
|
|
|
|
require.Equal(t, scenario.body, normalizeBody(res.Body))
|
2020-04-29 09:16:14 -07:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type notReadyReadStorage struct {
|
|
|
|
LocalStorage
|
|
|
|
}
|
|
|
|
|
|
|
|
func (notReadyReadStorage) Querier(context.Context, int64, int64) (storage.Querier, error) {
|
|
|
|
return nil, errors.Wrap(tsdb.ErrNotReady, "wrap")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (notReadyReadStorage) StartTime() (int64, error) {
|
|
|
|
return 0, errors.Wrap(tsdb.ErrNotReady, "wrap")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (notReadyReadStorage) Stats(string) (*tsdb.Stats, error) {
|
|
|
|
return nil, errors.Wrap(tsdb.ErrNotReady, "wrap")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Regression test for https://github.com/prometheus/prometheus/issues/7181.
|
|
|
|
func TestFederation_NotReady(t *testing.T) {
|
|
|
|
for name, scenario := range scenarios {
|
|
|
|
t.Run(name, func(t *testing.T) {
|
2021-10-25 00:18:26 -07:00
|
|
|
h := &Handler{
|
|
|
|
localStorage: notReadyReadStorage{},
|
|
|
|
lookbackDelta: 5 * time.Minute,
|
|
|
|
now: func() model.Time { return 101 * 60 * 1000 }, // 101min after epoch.
|
|
|
|
config: &config.Config{
|
|
|
|
GlobalConfig: config.GlobalConfig{
|
|
|
|
ExternalLabels: scenario.externalLabels,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-04-29 09:16:14 -07:00
|
|
|
req := httptest.NewRequest("GET", "http://example.org/federate?"+scenario.params, nil)
|
|
|
|
res := httptest.NewRecorder()
|
|
|
|
|
|
|
|
h.federation(res, req)
|
|
|
|
if scenario.code == http.StatusBadRequest {
|
|
|
|
// Request are expected to be checked before DB readiness.
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, http.StatusBadRequest, res.Code)
|
2020-04-29 09:16:14 -07:00
|
|
|
return
|
|
|
|
}
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, http.StatusServiceUnavailable, res.Code)
|
2020-04-29 09:16:14 -07:00
|
|
|
})
|
2016-09-08 08:39:52 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// normalizeBody sorts the lines within a metric to make it easy to verify the body.
|
|
|
|
// (Federation is not taking care of sorting within a metric family.)
|
|
|
|
func normalizeBody(body *bytes.Buffer) string {
|
|
|
|
var (
|
|
|
|
lines []string
|
|
|
|
lastHash int
|
|
|
|
)
|
|
|
|
for line, err := body.ReadString('\n'); err == nil; line, err = body.ReadString('\n') {
|
|
|
|
if line[0] == '#' && len(lines) > 0 {
|
|
|
|
sort.Strings(lines[lastHash+1:])
|
|
|
|
lastHash = len(lines)
|
|
|
|
}
|
|
|
|
lines = append(lines, line)
|
|
|
|
}
|
|
|
|
if len(lines) > 0 {
|
|
|
|
sort.Strings(lines[lastHash+1:])
|
|
|
|
}
|
|
|
|
return strings.Join(lines, "")
|
|
|
|
}
|
2023-01-09 05:05:04 -08:00
|
|
|
|
|
|
|
func TestFederationWithNativeHistograms(t *testing.T) {
|
|
|
|
suite, err := promql.NewTest(t, "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer suite.Close()
|
|
|
|
|
|
|
|
if err := suite.Run(); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var expVec promql.Vector
|
|
|
|
|
|
|
|
db := suite.TSDB()
|
|
|
|
hist := &histogram.Histogram{
|
|
|
|
Count: 10,
|
|
|
|
ZeroCount: 2,
|
|
|
|
ZeroThreshold: 0.001,
|
|
|
|
Sum: 39.4,
|
|
|
|
Schema: 1,
|
|
|
|
PositiveSpans: []histogram.Span{
|
|
|
|
{Offset: 0, Length: 2},
|
|
|
|
{Offset: 1, Length: 2},
|
|
|
|
},
|
|
|
|
PositiveBuckets: []int64{1, 1, -1, 0},
|
|
|
|
NegativeSpans: []histogram.Span{
|
|
|
|
{Offset: 0, Length: 2},
|
|
|
|
{Offset: 1, Length: 2},
|
|
|
|
},
|
|
|
|
NegativeBuckets: []int64{1, 1, -1, 0},
|
|
|
|
}
|
|
|
|
app := db.Appender(context.Background())
|
|
|
|
for i := 0; i < 6; i++ {
|
|
|
|
l := labels.FromStrings("__name__", "test_metric", "foo", fmt.Sprintf("%d", i))
|
|
|
|
expL := labels.FromStrings("__name__", "test_metric", "instance", "", "foo", fmt.Sprintf("%d", i))
|
|
|
|
if i%3 == 0 {
|
2023-01-12 06:20:50 -08:00
|
|
|
_, err = app.Append(0, l, 100*60*1000, float64(i*100))
|
2023-01-09 05:05:04 -08:00
|
|
|
expVec = append(expVec, promql.Sample{
|
promql: Separate `Point` into `FPoint` and `HPoint`
In other words: Instead of having a “polymorphous” `Point` that can
either contain a float value or a histogram value, use an `FPoint` for
floats and an `HPoint` for histograms.
This seemingly small change has a _lot_ of repercussions throughout
the codebase.
The idea here is to avoid the increase in size of `Point` arrays that
happened after native histograms had been added.
The higher-level data structures (`Sample`, `Series`, etc.) are still
“polymorphous”. The same idea could be applied to them, but at each
step the trade-offs needed to be evaluated.
The idea with this change is to do the minimum necessary to get back
to pre-histogram performance for functions that do not touch
histograms. Here are comparisons for the `changes` function. The test
data doesn't include histograms yet. Ideally, there would be no change
in the benchmark result at all.
First runtime v2.39 compared to directly prior to this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 542µs ± 1% +38.58% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 617µs ± 2% +36.48% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.36ms ± 2% +21.58% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 8.94ms ± 1% +14.21% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.30ms ± 1% +10.67% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.10ms ± 1% +11.82% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 11.8ms ± 1% +12.50% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 87.4ms ± 1% +12.63% (p=0.000 n=9+9)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 32.8ms ± 1% +8.01% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.6ms ± 2% +9.64% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 117ms ± 1% +11.69% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 876ms ± 1% +11.83% (p=0.000 n=9+10)
```
And then runtime v2.39 compared to after this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 547µs ± 1% +39.84% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 616µs ± 2% +36.15% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.26ms ± 1% +12.20% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 7.95ms ± 1% +1.59% (p=0.000 n=10+8)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.38ms ± 2% +13.49% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.02ms ± 1% +9.80% (p=0.000 n=10+9)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 10.8ms ± 1% +3.08% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 78.1ms ± 1% +0.58% (p=0.035 n=9+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 33.5ms ± 4% +10.18% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.0ms ± 1% +7.98% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 107ms ± 1% +1.92% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 775ms ± 1% -1.02% (p=0.019 n=9+9)
```
In summary, the runtime doesn't really improve with this change for
queries with just a few steps. For queries with many steps, this
commit essentially reinstates the old performance. This is good
because the many-step queries are the one that matter most (longest
absolute runtime).
In terms of allocations, though, this commit doesn't make a dent at
all (numbers not shown). The reason is that most of the allocations
happen in the sampleRingIterator (in the storage package), which has
to be addressed in a separate commit.
Signed-off-by: beorn7 <beorn@grafana.com>
2022-10-28 07:58:40 -07:00
|
|
|
T: 100 * 60 * 1000,
|
|
|
|
F: float64(i * 100),
|
2023-01-09 05:05:04 -08:00
|
|
|
Metric: expL,
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
hist.ZeroCount++
|
2023-01-12 06:20:50 -08:00
|
|
|
_, err = app.AppendHistogram(0, l, 100*60*1000, hist.Copy(), nil)
|
2023-01-09 05:05:04 -08:00
|
|
|
expVec = append(expVec, promql.Sample{
|
promql: Separate `Point` into `FPoint` and `HPoint`
In other words: Instead of having a “polymorphous” `Point` that can
either contain a float value or a histogram value, use an `FPoint` for
floats and an `HPoint` for histograms.
This seemingly small change has a _lot_ of repercussions throughout
the codebase.
The idea here is to avoid the increase in size of `Point` arrays that
happened after native histograms had been added.
The higher-level data structures (`Sample`, `Series`, etc.) are still
“polymorphous”. The same idea could be applied to them, but at each
step the trade-offs needed to be evaluated.
The idea with this change is to do the minimum necessary to get back
to pre-histogram performance for functions that do not touch
histograms. Here are comparisons for the `changes` function. The test
data doesn't include histograms yet. Ideally, there would be no change
in the benchmark result at all.
First runtime v2.39 compared to directly prior to this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 542µs ± 1% +38.58% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 617µs ± 2% +36.48% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.36ms ± 2% +21.58% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 8.94ms ± 1% +14.21% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.30ms ± 1% +10.67% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.10ms ± 1% +11.82% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 11.8ms ± 1% +12.50% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 87.4ms ± 1% +12.63% (p=0.000 n=9+9)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 32.8ms ± 1% +8.01% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.6ms ± 2% +9.64% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 117ms ± 1% +11.69% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 876ms ± 1% +11.83% (p=0.000 n=9+10)
```
And then runtime v2.39 compared to after this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 547µs ± 1% +39.84% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 616µs ± 2% +36.15% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.26ms ± 1% +12.20% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 7.95ms ± 1% +1.59% (p=0.000 n=10+8)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.38ms ± 2% +13.49% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.02ms ± 1% +9.80% (p=0.000 n=10+9)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 10.8ms ± 1% +3.08% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 78.1ms ± 1% +0.58% (p=0.035 n=9+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 33.5ms ± 4% +10.18% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.0ms ± 1% +7.98% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 107ms ± 1% +1.92% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 775ms ± 1% -1.02% (p=0.019 n=9+9)
```
In summary, the runtime doesn't really improve with this change for
queries with just a few steps. For queries with many steps, this
commit essentially reinstates the old performance. This is good
because the many-step queries are the one that matter most (longest
absolute runtime).
In terms of allocations, though, this commit doesn't make a dent at
all (numbers not shown). The reason is that most of the allocations
happen in the sampleRingIterator (in the storage package), which has
to be addressed in a separate commit.
Signed-off-by: beorn7 <beorn@grafana.com>
2022-10-28 07:58:40 -07:00
|
|
|
T: 100 * 60 * 1000,
|
|
|
|
H: hist.ToFloat(),
|
2023-01-09 05:05:04 -08:00
|
|
|
Metric: expL,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
require.NoError(t, app.Commit())
|
|
|
|
|
|
|
|
h := &Handler{
|
|
|
|
localStorage: &dbAdapter{suite.TSDB()},
|
|
|
|
lookbackDelta: 5 * time.Minute,
|
|
|
|
now: func() model.Time { return 101 * 60 * 1000 }, // 101min after epoch.
|
|
|
|
config: &config.Config{
|
|
|
|
GlobalConfig: config.GlobalConfig{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
req := httptest.NewRequest("GET", "http://example.org/federate?match[]=test_metric", nil)
|
|
|
|
req.Header.Add("Accept", `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited,application/openmetrics-text;version=1.0.0;q=0.8,application/openmetrics-text;version=0.0.1;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1`)
|
|
|
|
res := httptest.NewRecorder()
|
|
|
|
|
|
|
|
h.federation(res, req)
|
|
|
|
|
|
|
|
require.Equal(t, http.StatusOK, res.Code)
|
|
|
|
body, err := io.ReadAll(res.Body)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
p := textparse.NewProtobufParser(body)
|
|
|
|
var actVec promql.Vector
|
|
|
|
metricFamilies := 0
|
promql: Separate `Point` into `FPoint` and `HPoint`
In other words: Instead of having a “polymorphous” `Point` that can
either contain a float value or a histogram value, use an `FPoint` for
floats and an `HPoint` for histograms.
This seemingly small change has a _lot_ of repercussions throughout
the codebase.
The idea here is to avoid the increase in size of `Point` arrays that
happened after native histograms had been added.
The higher-level data structures (`Sample`, `Series`, etc.) are still
“polymorphous”. The same idea could be applied to them, but at each
step the trade-offs needed to be evaluated.
The idea with this change is to do the minimum necessary to get back
to pre-histogram performance for functions that do not touch
histograms. Here are comparisons for the `changes` function. The test
data doesn't include histograms yet. Ideally, there would be no change
in the benchmark result at all.
First runtime v2.39 compared to directly prior to this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 542µs ± 1% +38.58% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 617µs ± 2% +36.48% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.36ms ± 2% +21.58% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 8.94ms ± 1% +14.21% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.30ms ± 1% +10.67% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.10ms ± 1% +11.82% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 11.8ms ± 1% +12.50% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 87.4ms ± 1% +12.63% (p=0.000 n=9+9)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 32.8ms ± 1% +8.01% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.6ms ± 2% +9.64% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 117ms ± 1% +11.69% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 876ms ± 1% +11.83% (p=0.000 n=9+10)
```
And then runtime v2.39 compared to after this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 547µs ± 1% +39.84% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 616µs ± 2% +36.15% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.26ms ± 1% +12.20% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 7.95ms ± 1% +1.59% (p=0.000 n=10+8)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.38ms ± 2% +13.49% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.02ms ± 1% +9.80% (p=0.000 n=10+9)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 10.8ms ± 1% +3.08% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 78.1ms ± 1% +0.58% (p=0.035 n=9+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 33.5ms ± 4% +10.18% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.0ms ± 1% +7.98% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 107ms ± 1% +1.92% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 775ms ± 1% -1.02% (p=0.019 n=9+9)
```
In summary, the runtime doesn't really improve with this change for
queries with just a few steps. For queries with many steps, this
commit essentially reinstates the old performance. This is good
because the many-step queries are the one that matter most (longest
absolute runtime).
In terms of allocations, though, this commit doesn't make a dent at
all (numbers not shown). The reason is that most of the allocations
happen in the sampleRingIterator (in the storage package), which has
to be addressed in a separate commit.
Signed-off-by: beorn7 <beorn@grafana.com>
2022-10-28 07:58:40 -07:00
|
|
|
l := labels.Labels{}
|
2023-01-09 05:05:04 -08:00
|
|
|
for {
|
|
|
|
et, err := p.Next()
|
|
|
|
if err == io.EOF {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
require.NoError(t, err)
|
|
|
|
if et == textparse.EntryHistogram || et == textparse.EntrySeries {
|
|
|
|
p.Metric(&l)
|
|
|
|
}
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
switch et {
|
|
|
|
case textparse.EntryHelp:
|
|
|
|
metricFamilies++
|
|
|
|
case textparse.EntryHistogram:
|
2023-01-09 05:05:04 -08:00
|
|
|
_, parsedTimestamp, h, fh := p.Histogram()
|
|
|
|
require.Nil(t, h)
|
promql: Separate `Point` into `FPoint` and `HPoint`
In other words: Instead of having a “polymorphous” `Point` that can
either contain a float value or a histogram value, use an `FPoint` for
floats and an `HPoint` for histograms.
This seemingly small change has a _lot_ of repercussions throughout
the codebase.
The idea here is to avoid the increase in size of `Point` arrays that
happened after native histograms had been added.
The higher-level data structures (`Sample`, `Series`, etc.) are still
“polymorphous”. The same idea could be applied to them, but at each
step the trade-offs needed to be evaluated.
The idea with this change is to do the minimum necessary to get back
to pre-histogram performance for functions that do not touch
histograms. Here are comparisons for the `changes` function. The test
data doesn't include histograms yet. Ideally, there would be no change
in the benchmark result at all.
First runtime v2.39 compared to directly prior to this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 542µs ± 1% +38.58% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 617µs ± 2% +36.48% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.36ms ± 2% +21.58% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 8.94ms ± 1% +14.21% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.30ms ± 1% +10.67% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.10ms ± 1% +11.82% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 11.8ms ± 1% +12.50% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 87.4ms ± 1% +12.63% (p=0.000 n=9+9)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 32.8ms ± 1% +8.01% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.6ms ± 2% +9.64% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 117ms ± 1% +11.69% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 876ms ± 1% +11.83% (p=0.000 n=9+10)
```
And then runtime v2.39 compared to after this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 547µs ± 1% +39.84% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 616µs ± 2% +36.15% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.26ms ± 1% +12.20% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 7.95ms ± 1% +1.59% (p=0.000 n=10+8)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.38ms ± 2% +13.49% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.02ms ± 1% +9.80% (p=0.000 n=10+9)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 10.8ms ± 1% +3.08% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 78.1ms ± 1% +0.58% (p=0.035 n=9+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 33.5ms ± 4% +10.18% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.0ms ± 1% +7.98% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 107ms ± 1% +1.92% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 775ms ± 1% -1.02% (p=0.019 n=9+9)
```
In summary, the runtime doesn't really improve with this change for
queries with just a few steps. For queries with many steps, this
commit essentially reinstates the old performance. This is good
because the many-step queries are the one that matter most (longest
absolute runtime).
In terms of allocations, though, this commit doesn't make a dent at
all (numbers not shown). The reason is that most of the allocations
happen in the sampleRingIterator (in the storage package), which has
to be addressed in a separate commit.
Signed-off-by: beorn7 <beorn@grafana.com>
2022-10-28 07:58:40 -07:00
|
|
|
actVec = append(actVec, promql.Sample{
|
|
|
|
T: *parsedTimestamp,
|
|
|
|
H: fh,
|
|
|
|
Metric: l,
|
|
|
|
})
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
case textparse.EntrySeries:
|
promql: Separate `Point` into `FPoint` and `HPoint`
In other words: Instead of having a “polymorphous” `Point` that can
either contain a float value or a histogram value, use an `FPoint` for
floats and an `HPoint` for histograms.
This seemingly small change has a _lot_ of repercussions throughout
the codebase.
The idea here is to avoid the increase in size of `Point` arrays that
happened after native histograms had been added.
The higher-level data structures (`Sample`, `Series`, etc.) are still
“polymorphous”. The same idea could be applied to them, but at each
step the trade-offs needed to be evaluated.
The idea with this change is to do the minimum necessary to get back
to pre-histogram performance for functions that do not touch
histograms. Here are comparisons for the `changes` function. The test
data doesn't include histograms yet. Ideally, there would be no change
in the benchmark result at all.
First runtime v2.39 compared to directly prior to this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 542µs ± 1% +38.58% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 617µs ± 2% +36.48% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.36ms ± 2% +21.58% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 8.94ms ± 1% +14.21% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.30ms ± 1% +10.67% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.10ms ± 1% +11.82% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 11.8ms ± 1% +12.50% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 87.4ms ± 1% +12.63% (p=0.000 n=9+9)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 32.8ms ± 1% +8.01% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.6ms ± 2% +9.64% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 117ms ± 1% +11.69% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 876ms ± 1% +11.83% (p=0.000 n=9+10)
```
And then runtime v2.39 compared to after this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 547µs ± 1% +39.84% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 616µs ± 2% +36.15% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.26ms ± 1% +12.20% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 7.95ms ± 1% +1.59% (p=0.000 n=10+8)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.38ms ± 2% +13.49% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.02ms ± 1% +9.80% (p=0.000 n=10+9)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 10.8ms ± 1% +3.08% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 78.1ms ± 1% +0.58% (p=0.035 n=9+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 33.5ms ± 4% +10.18% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.0ms ± 1% +7.98% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 107ms ± 1% +1.92% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 775ms ± 1% -1.02% (p=0.019 n=9+9)
```
In summary, the runtime doesn't really improve with this change for
queries with just a few steps. For queries with many steps, this
commit essentially reinstates the old performance. This is good
because the many-step queries are the one that matter most (longest
absolute runtime).
In terms of allocations, though, this commit doesn't make a dent at
all (numbers not shown). The reason is that most of the allocations
happen in the sampleRingIterator (in the storage package), which has
to be addressed in a separate commit.
Signed-off-by: beorn7 <beorn@grafana.com>
2022-10-28 07:58:40 -07:00
|
|
|
_, parsedTimestamp, f := p.Series()
|
|
|
|
actVec = append(actVec, promql.Sample{
|
|
|
|
T: *parsedTimestamp,
|
|
|
|
F: f,
|
|
|
|
Metric: l,
|
|
|
|
})
|
2023-01-09 05:05:04 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(codesome): Once PromQL is able to set the CounterResetHint on histograms,
|
|
|
|
// test it with switching histogram types for metric families.
|
|
|
|
require.Equal(t, 4, metricFamilies)
|
|
|
|
require.Equal(t, expVec, actVec)
|
|
|
|
}
|