From c2af0de522b9d89ed681035262be9d6ca0452381 Mon Sep 17 00:00:00 2001 From: songjiayang Date: Sun, 12 Jun 2022 08:06:14 +0800 Subject: [PATCH 01/40] make sure response error when TOC parse failed Signed-off-by: songjiayang --- tsdb/index/index.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 29295c45f5..c81441a6bf 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -174,18 +174,15 @@ func NewTOCFromByteSlice(bs ByteSlice) (*TOC, error) { return nil, errors.Wrap(encoding.ErrInvalidChecksum, "read TOC") } - if err := d.Err(); err != nil { - return nil, err - } - - return &TOC{ + toc := &TOC{ Symbols: d.Be64(), Series: d.Be64(), LabelIndices: d.Be64(), LabelIndicesTable: d.Be64(), Postings: d.Be64(), PostingsTable: d.Be64(), - }, nil + } + return toc, d.Err() } // NewWriter returns a new Writer to the given filename. It serializes data in format version 2. From 00ba2f9a46763389cfea922391c0b1c00cc6baba Mon Sep 17 00:00:00 2001 From: Ashish Kurmi Date: Wed, 7 Sep 2022 21:27:16 -0700 Subject: [PATCH 02/40] ci: add minimum GitHub token permissions for workflows Signed-off-by: Ashish Kurmi --- .github/workflows/buf-lint.yml | 3 +++ .github/workflows/buf.yml | 3 +++ .github/workflows/codeql-analysis.yml | 3 +++ .github/workflows/funcbench.yml | 3 +++ .github/workflows/fuzzing.yml | 3 +++ .github/workflows/repo_sync.yml | 3 +++ 6 files changed, 18 insertions(+) diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index 37756adbfd..bb5d78e5e7 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -4,6 +4,9 @@ on: paths: - ".github/workflows/buf-lint.yml" - "**.proto" +permissions: + contents: read + jobs: buf: name: lint diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index 4fe8c86b3e..ee06981e0f 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -3,6 +3,9 @@ on: push: branches: - main +permissions: + contents: read + jobs: buf: name: lint and publish diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 01075f0c22..298c0701af 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -6,6 +6,9 @@ on: schedule: - cron: "26 14 * * 1" +permissions: + contents: read + jobs: analyze: name: Analyze diff --git a/.github/workflows/funcbench.yml b/.github/workflows/funcbench.yml index 6583aa95b9..0826bcabe4 100644 --- a/.github/workflows/funcbench.yml +++ b/.github/workflows/funcbench.yml @@ -2,6 +2,9 @@ on: repository_dispatch: types: [funcbench_start] name: Funcbench Workflow +permissions: + contents: read + jobs: run_funcbench: name: Running funcbench diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml index 87c40d3105..d0751f2fb6 100644 --- a/.github/workflows/fuzzing.yml +++ b/.github/workflows/fuzzing.yml @@ -1,6 +1,9 @@ name: CIFuzz on: workflow_call: +permissions: + contents: read + jobs: Fuzzing: runs-on: ubuntu-latest diff --git a/.github/workflows/repo_sync.yml b/.github/workflows/repo_sync.yml index ca8197878c..392d801b0e 100644 --- a/.github/workflows/repo_sync.yml +++ b/.github/workflows/repo_sync.yml @@ -2,6 +2,9 @@ on: schedule: - cron: '44 17 * * *' +permissions: + contents: read + jobs: repo_sync: runs-on: ubuntu-latest From 3fb881af261361a6083eedb329dd3aef45a377fe Mon Sep 17 00:00:00 2001 From: Dimitar Dimitrov Date: Sun, 11 Sep 2022 11:21:03 +0200 Subject: [PATCH 03/40] Simplify rule group's EvalTimestamp formula I found it hard to understand how EvalTimestamp works, so I wanted to simplify the math there. This PR should be a noop. Current formula is: ``` offset = g.hash % g.interval adjNow = startTime - offset base = adjNow - (adjNow % g.interval) EvalTimestamp = base + offset ``` I simplify `EvalTimestamp` ``` EvalTimestamp = base + offset # expand base = adjNow - (adjNow % g.interval) + offset # expand adjNow = startTime - offset - ((startTime - offset) % g.interval) + offset # cancel out offset = startTime - ((startTime - offset) % g.interval) # expand A+B (mod M) = (A (mod M) + B (mod M)) (mod M) = startTime - (startTime % g.interval - offset % g.interval) % g.interval # expand offset = startTime - (startTime % g.interval - ((g.hash % g.interval) % g.interval)) % g.interval # remove redundant mod g.interval = startTime - (startTime % g.interval - g.hash % g.interval) % g.interval # simplify (A (mod M) + B (mod M)) (mod M) = A+B (mod M) = startTime - (startTime - g.hash) % g.interval offset = (startTime - g.hash) % g.interval EvalTimestamp = startTime - offset ``` Signed-off-by: Dimitar Dimitrov --- rules/manager.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/rules/manager.go b/rules/manager.go index 5eed4bfb0f..140fb45b76 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -532,13 +532,9 @@ func (g *Group) setLastEvaluation(ts time.Time) { // EvalTimestamp returns the immediately preceding consistently slotted evaluation time. func (g *Group) EvalTimestamp(startTime int64) time.Time { - var ( - offset = int64(g.hash() % uint64(g.interval)) - adjNow = startTime - offset - base = adjNow - (adjNow % int64(g.interval)) - ) + offset := (uint64(startTime) - g.hash()) % uint64(g.interval) - return time.Unix(0, base+offset).UTC() + return time.Unix(0, startTime-int64(offset)).UTC() } func nameAndLabels(rule Rule) string { From ecfaa48a17d4f0f8f35d3b7e19d4b6f969f02c47 Mon Sep 17 00:00:00 2001 From: Ashish Kurmi <100655670+boahc077@users.noreply.github.com> Date: Mon, 19 Sep 2022 01:17:46 -0700 Subject: [PATCH 04/40] Update .github/workflows/codeql-analysis.yml Co-authored-by: Christian Hoffmann Signed-off-by: Ashish Kurmi <100655670+boahc077@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 298c0701af..f0d3c060ea 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -8,6 +8,7 @@ on: permissions: contents: read + security-events: write jobs: analyze: From 28a66e183d4e758e165ac84deb2cc86a5282fb54 Mon Sep 17 00:00:00 2001 From: Douglas Camata <159076+douglascamata@users.noreply.github.com> Date: Fri, 7 Oct 2022 13:15:33 +0200 Subject: [PATCH 05/40] Update relabel.Process comment Signed-off-by: Douglas Camata <159076+douglascamata@users.noreply.github.com> --- model/relabel/relabel.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index e0d7f6ddf5..b4bb999f75 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -188,10 +188,12 @@ func (re Regexp) String() string { return str[4 : len(str)-2] } -// Process returns a relabeled copy of the given label set. The relabel configurations +// Process returns a relabeled version of the given label set. The relabel configurations // are applied in order of input. +// There are circumstances where Process will modify the input label. +// If you want to avoid issues with the input label set being modified, at the cost of +// higher memory usage, you can use lbls.Copy(). // If a label set is dropped, nil is returned. -// May return the input labelSet modified. func Process(lbls labels.Labels, cfgs ...*Config) labels.Labels { lb := labels.NewBuilder(nil) for _, cfg := range cfgs { From 03ab8dcca0040674de5c1a6a9caabdf5ab4c1d4b Mon Sep 17 00:00:00 2001 From: Dimitar Dimitrov Date: Wed, 12 Oct 2022 14:12:03 +0200 Subject: [PATCH 06/40] Add comments on EvalTimestamp Signed-off-by: Dimitar Dimitrov --- rules/manager.go | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/rules/manager.go b/rules/manager.go index 140fb45b76..e24eee2c04 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -532,9 +532,28 @@ func (g *Group) setLastEvaluation(ts time.Time) { // EvalTimestamp returns the immediately preceding consistently slotted evaluation time. func (g *Group) EvalTimestamp(startTime int64) time.Time { - offset := (uint64(startTime) - g.hash()) % uint64(g.interval) + var ( + offset = int64(g.hash() % uint64(g.interval)) - return time.Unix(0, startTime-int64(offset)).UTC() + // This group's evaluation times differ from the perfect time intervals by `offset` nanoseconds. + // But we can only use `% interval` to align with the interval. And `% interval` will always + // align with the perfect time intervals, instead of this group's. Because of this we add + // `offset` _after_ aligning with the perfect time interval. + // + // There can be cases where adding `offset` to the perfect evaluation time can yield a + // timestamp in the future, which is not what EvalTimestamp should do. + // So we subtract one `offset` to make sure that `now - (now % interval) + offset` gives an + // evaluation time in the past. + adjNow = startTime - offset + + // Adjust to perfect evaluation intervals. + base = adjNow - (adjNow % int64(g.interval)) + + // Add one offset to randomize the evaluation times of this group. + next = base + offset + ) + + return time.Unix(0, next).UTC() } func nameAndLabels(rule Rule) string { From b6caa6cabf65ccffcf1532d5bb02e7af17371e6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Krupa=20=28paulfantom=29?= Date: Wed, 30 Nov 2022 14:02:54 +0100 Subject: [PATCH 07/40] documentation/mixin: use prometheus metrics for dashboard variables MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Paweł Krupa (paulfantom) --- documentation/prometheus-mixin/dashboards.libsonnet | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation/prometheus-mixin/dashboards.libsonnet b/documentation/prometheus-mixin/dashboards.libsonnet index b95f13e0a0..b6e295a963 100644 --- a/documentation/prometheus-mixin/dashboards.libsonnet +++ b/documentation/prometheus-mixin/dashboards.libsonnet @@ -314,7 +314,7 @@ local template = grafana.template; template.new( 'cluster', '$datasource', - 'label_values(kube_pod_container_info{image=~".*prometheus.*"}, cluster)' % $._config, + 'label_values(prometheus_build_info, cluster)' % $._config, refresh='time', current={ selected: true, From 3e94dd8c8f83a11c4ac30e5a748b96adbc85cb40 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 25 Jan 2023 14:30:47 +1100 Subject: [PATCH 08/40] Add extension point for returning different content types from API endpoints Signed-off-by: Charles Korn --- web/api/v1/api.go | 64 ++++++-- web/api/v1/api_test.go | 290 ++++++++++++---------------------- web/api/v1/codec.go | 26 +++ web/api/v1/json_codec.go | 32 ++++ web/api/v1/json_codec_test.go | 178 +++++++++++++++++++++ 5 files changed, 391 insertions(+), 199 deletions(-) create mode 100644 web/api/v1/codec.go create mode 100644 web/api/v1/json_codec.go create mode 100644 web/api/v1/json_codec_test.go diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 894a8666a6..525bc446ae 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -80,6 +80,8 @@ const ( var LocalhostRepresentations = []string{"127.0.0.1", "localhost", "::1"} +var defaultCodec = JSONCodec{} + type apiError struct { typ errorType err error @@ -145,7 +147,8 @@ type RuntimeInfo struct { StorageRetention string `json:"storageRetention"` } -type response struct { +// Response contains a response to a HTTP API request. +type Response struct { Status status `json:"status"` Data interface{} `json:"data,omitempty"` ErrorType errorType `json:"errorType,omitempty"` @@ -208,6 +211,8 @@ type API struct { remoteWriteHandler http.Handler remoteReadHandler http.Handler + + codecs map[string]Codec } func init() { @@ -273,8 +278,12 @@ func NewAPI( statsRenderer: defaultStatsRenderer, remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame), + + codecs: map[string]Codec{}, } + a.InstallCodec(defaultCodec) + if statsRenderer != nil { a.statsRenderer = statsRenderer } @@ -286,6 +295,16 @@ func NewAPI( return a } +// InstallCodec adds codec to this API's available codecs. +// If codec handles a content type handled by a codec already installed in this API, codec replaces the previous codec. +func (api *API) InstallCodec(codec Codec) { + if api.codecs == nil { + api.codecs = map[string]Codec{} + } + + api.codecs[codec.ContentType()] = codec +} + func setUnavailStatusOnTSDBNotReady(r apiFuncResult) apiFuncResult { if r.err != nil && errors.Cause(r.err.err) == tsdb.ErrNotReady { r.err.typ = errorUnavailable @@ -308,7 +327,7 @@ func (api *API) Register(r *route.Router) { } if result.data != nil { - api.respond(w, result.data, result.warnings) + api.respond(w, r, result.data, result.warnings) return } w.WriteHeader(http.StatusNoContent) @@ -1446,7 +1465,7 @@ func (api *API) serveWALReplayStatus(w http.ResponseWriter, r *http.Request) { if err != nil { api.respondError(w, &apiError{errorInternal, err}, nil) } - api.respond(w, walReplayStatus{ + api.respond(w, r, walReplayStatus{ Min: status.Min, Max: status.Max, Current: status.Current, @@ -1548,34 +1567,59 @@ func (api *API) cleanTombstones(r *http.Request) apiFuncResult { return apiFuncResult{nil, nil, nil, nil} } -func (api *API) respond(w http.ResponseWriter, data interface{}, warnings storage.Warnings) { +func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface{}, warnings storage.Warnings) { statusMessage := statusSuccess var warningStrings []string for _, warning := range warnings { warningStrings = append(warningStrings, warning.Error()) } - json := jsoniter.ConfigCompatibleWithStandardLibrary - b, err := json.Marshal(&response{ + + resp := &Response{ Status: statusMessage, Data: data, Warnings: warningStrings, - }) + } + + codec := api.negotiateCodec(req, resp) + b, err := codec.Encode(resp) if err != nil { - level.Error(api.logger).Log("msg", "error marshaling json response", "err", err) + level.Error(api.logger).Log("msg", "error marshaling response", "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", codec.ContentType()) w.WriteHeader(http.StatusOK) if n, err := w.Write(b); err != nil { level.Error(api.logger).Log("msg", "error writing response", "bytesWritten", n, "err", err) } } +// HTTP content negotiation is hard (see https://developer.mozilla.org/en-US/docs/Web/HTTP/Content_negotiation). +// Ideally, we shouldn't be implementing this ourselves - https://github.com/golang/go/issues/19307 is an open proposal to add +// this to the Go stdlib and has links to a number of other implementations. +// +// This is an MVP, and doesn't support features like wildcards or weighting. +func (api *API) negotiateCodec(req *http.Request, resp *Response) Codec { + acceptHeader := req.Header.Get("Accept") + if acceptHeader == "" { + return defaultCodec + } + + for _, contentType := range strings.Split(acceptHeader, ",") { + codec, ok := api.codecs[strings.TrimSpace(contentType)] + if ok && codec.CanEncode(resp) { + return codec + } + } + + level.Warn(api.logger).Log("msg", "could not find suitable codec for response, falling back to default codec", "accept_header", acceptHeader) + return defaultCodec +} + func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data interface{}) { json := jsoniter.ConfigCompatibleWithStandardLibrary - b, err := json.Marshal(&response{ + b, err := json.Marshal(&Response{ Status: statusError, ErrorType: apiErr.typ, Error: apiErr.err.Error(), diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 7e2dcbd8bb..617a8bdf3c 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -18,7 +18,6 @@ import ( "encoding/json" "fmt" "io" - "math" "net/http" "net/http/httptest" "net/url" @@ -30,7 +29,6 @@ import ( "testing" "time" - "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/util/stats" @@ -2765,39 +2763,93 @@ func TestAdminEndpoints(t *testing.T) { } func TestRespondSuccess(t *testing.T) { + api := API{ + logger: log.NewNopLogger(), + } + + api.InstallCodec(&testCodec{contentType: "test/cannot-encode", canEncode: false}) + api.InstallCodec(&testCodec{contentType: "test/can-encode", canEncode: true}) + api.InstallCodec(&testCodec{contentType: "test/can-encode-2", canEncode: true}) + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - api := API{} - api.respond(w, "test", nil) + api.respond(w, r, "test", nil) })) defer s.Close() - resp, err := http.Get(s.URL) - if err != nil { - t.Fatalf("Error on test request: %s", err) - } - body, err := io.ReadAll(resp.Body) - defer resp.Body.Close() - if err != nil { - t.Fatalf("Error reading response body: %s", err) - } + for _, tc := range []struct { + name string + acceptHeader string + expectedContentType string + expectedBody string + }{ + { + name: "no Accept header", + expectedContentType: "application/json", + expectedBody: `{"status":"success","data":"test"}`, + }, + { + name: "Accept header with single content type which is suitable", + acceptHeader: "test/can-encode", + expectedContentType: "test/can-encode", + expectedBody: `response from test/can-encode codec`, + }, + { + name: "Accept header with single content type which is not available", + acceptHeader: "test/not-registered", + expectedContentType: "application/json", + expectedBody: `{"status":"success","data":"test"}`, + }, + { + name: "Accept header with single content type which cannot encode the response payload", + acceptHeader: "test/cannot-encode", + expectedContentType: "application/json", + expectedBody: `{"status":"success","data":"test"}`, + }, + { + name: "Accept header with multiple content types, all of which are suitable", + acceptHeader: "test/can-encode, test/can-encode-2", + expectedContentType: "test/can-encode", + expectedBody: `response from test/can-encode codec`, + }, + { + name: "Accept header with multiple content types, only one of which is available", + acceptHeader: "test/not-registered, test/can-encode", + expectedContentType: "test/can-encode", + expectedBody: `response from test/can-encode codec`, + }, + { + name: "Accept header with multiple content types, only one of which can encode the response payload", + acceptHeader: "test/cannot-encode, test/can-encode", + expectedContentType: "test/can-encode", + expectedBody: `response from test/can-encode codec`, + }, + { + name: "Accept header with multiple content types, none of which are available", + acceptHeader: "test/not-registered, test/also-not-registered", + expectedContentType: "application/json", + expectedBody: `{"status":"success","data":"test"}`, + }, + } { + t.Run(tc.name, func(t *testing.T) { + req, err := http.NewRequest(http.MethodGet, s.URL, nil) + require.NoError(t, err) - if resp.StatusCode != 200 { - t.Fatalf("Return code %d expected in success response but got %d", 200, resp.StatusCode) - } - if h := resp.Header.Get("Content-Type"); h != "application/json" { - t.Fatalf("Expected Content-Type %q but got %q", "application/json", h) - } + if tc.acceptHeader != "" { + req.Header.Set("Accept", tc.acceptHeader) + } - var res response - if err = json.Unmarshal([]byte(body), &res); err != nil { - t.Fatalf("Error unmarshaling JSON body: %s", err) - } + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) - exp := &response{ - Status: statusSuccess, - Data: "test", + body, err := io.ReadAll(resp.Body) + defer resp.Body.Close() + require.NoError(t, err) + + require.Equal(t, http.StatusOK, resp.StatusCode) + require.Equal(t, tc.expectedContentType, resp.Header.Get("Content-Type")) + require.Equal(t, tc.expectedBody, string(body)) + }) } - require.Equal(t, exp, &res) } func TestRespondError(t *testing.T) { @@ -2824,12 +2876,12 @@ func TestRespondError(t *testing.T) { t.Fatalf("Expected Content-Type %q but got %q", "application/json", h) } - var res response + var res Response if err = json.Unmarshal([]byte(body), &res); err != nil { t.Fatalf("Error unmarshaling JSON body: %s", err) } - exp := &response{ + exp := &Response{ Status: statusError, Data: "test", ErrorType: errorTimeout, @@ -3047,165 +3099,6 @@ func TestOptionsMethod(t *testing.T) { } } -func TestRespond(t *testing.T) { - cases := []struct { - response interface{} - expected string - }{ - { - response: &queryData{ - ResultType: parser.ValueTypeMatrix, - Result: promql.Matrix{ - promql.Series{ - Points: []promql.Point{{V: 1, T: 1000}}, - Metric: labels.FromStrings("__name__", "foo"), - }, - }, - }, - expected: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"foo"},"values":[[1,"1"]]}]}}`, - }, - { - response: &queryData{ - ResultType: parser.ValueTypeMatrix, - Result: promql.Matrix{ - promql.Series{ - Points: []promql.Point{{H: &histogram.FloatHistogram{ - Schema: 2, - ZeroThreshold: 0.001, - ZeroCount: 12, - Count: 10, - Sum: 20, - PositiveSpans: []histogram.Span{ - {Offset: 3, Length: 2}, - {Offset: 1, Length: 3}, - }, - NegativeSpans: []histogram.Span{ - {Offset: 2, Length: 2}, - }, - PositiveBuckets: []float64{1, 2, 2, 1, 1}, - NegativeBuckets: []float64{2, 1}, - }, T: 1000}}, - Metric: labels.FromStrings("__name__", "foo"), - }, - }, - }, - expected: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"foo"},"histograms":[[1,{"count":"10","sum":"20","buckets":[[1,"-1.6817928305074288","-1.414213562373095","1"],[1,"-1.414213562373095","-1.189207115002721","2"],[3,"-0.001","0.001","12"],[0,"1.414213562373095","1.6817928305074288","1"],[0,"1.6817928305074288","2","2"],[0,"2.378414230005442","2.82842712474619","2"],[0,"2.82842712474619","3.3635856610148576","1"],[0,"3.3635856610148576","4","1"]]}]]}]}}`, - }, - { - response: promql.Point{V: 0, T: 0}, - expected: `{"status":"success","data":[0,"0"]}`, - }, - { - response: promql.Point{V: 20, T: 1}, - expected: `{"status":"success","data":[0.001,"20"]}`, - }, - { - response: promql.Point{V: 20, T: 10}, - expected: `{"status":"success","data":[0.010,"20"]}`, - }, - { - response: promql.Point{V: 20, T: 100}, - expected: `{"status":"success","data":[0.100,"20"]}`, - }, - { - response: promql.Point{V: 20, T: 1001}, - expected: `{"status":"success","data":[1.001,"20"]}`, - }, - { - response: promql.Point{V: 20, T: 1010}, - expected: `{"status":"success","data":[1.010,"20"]}`, - }, - { - response: promql.Point{V: 20, T: 1100}, - expected: `{"status":"success","data":[1.100,"20"]}`, - }, - { - response: promql.Point{V: 20, T: 12345678123456555}, - expected: `{"status":"success","data":[12345678123456.555,"20"]}`, - }, - { - response: promql.Point{V: 20, T: -1}, - expected: `{"status":"success","data":[-0.001,"20"]}`, - }, - { - response: promql.Point{V: math.NaN(), T: 0}, - expected: `{"status":"success","data":[0,"NaN"]}`, - }, - { - response: promql.Point{V: math.Inf(1), T: 0}, - expected: `{"status":"success","data":[0,"+Inf"]}`, - }, - { - response: promql.Point{V: math.Inf(-1), T: 0}, - expected: `{"status":"success","data":[0,"-Inf"]}`, - }, - { - response: promql.Point{V: 1.2345678e6, T: 0}, - expected: `{"status":"success","data":[0,"1234567.8"]}`, - }, - { - response: promql.Point{V: 1.2345678e-6, T: 0}, - expected: `{"status":"success","data":[0,"0.0000012345678"]}`, - }, - { - response: promql.Point{V: 1.2345678e-67, T: 0}, - expected: `{"status":"success","data":[0,"1.2345678e-67"]}`, - }, - { - response: []exemplar.QueryResult{ - { - SeriesLabels: labels.FromStrings("foo", "bar"), - Exemplars: []exemplar.Exemplar{ - { - Labels: labels.FromStrings("traceID", "abc"), - Value: 100.123, - Ts: 1234, - }, - }, - }, - }, - expected: `{"status":"success","data":[{"seriesLabels":{"foo":"bar"},"exemplars":[{"labels":{"traceID":"abc"},"value":"100.123","timestamp":1.234}]}]}`, - }, - { - response: []exemplar.QueryResult{ - { - SeriesLabels: labels.FromStrings("foo", "bar"), - Exemplars: []exemplar.Exemplar{ - { - Labels: labels.FromStrings("traceID", "abc"), - Value: math.Inf(1), - Ts: 1234, - }, - }, - }, - }, - expected: `{"status":"success","data":[{"seriesLabels":{"foo":"bar"},"exemplars":[{"labels":{"traceID":"abc"},"value":"+Inf","timestamp":1.234}]}]}`, - }, - } - - for _, c := range cases { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - api := API{} - api.respond(w, c.response, nil) - })) - defer s.Close() - - resp, err := http.Get(s.URL) - if err != nil { - t.Fatalf("Error on test request: %s", err) - } - body, err := io.ReadAll(resp.Body) - defer resp.Body.Close() - if err != nil { - t.Fatalf("Error reading response body: %s", err) - } - - if string(body) != c.expected { - t.Fatalf("Expected response \n%v\n but got \n%v\n", c.expected, string(body)) - } - } -} - func TestTSDBStatus(t *testing.T) { tsdb := &fakeDB{} tsdbStatusAPI := func(api *API) apiFunc { return api.serveTSDBStatus } @@ -3281,6 +3174,8 @@ var testResponseWriter = httptest.ResponseRecorder{} func BenchmarkRespond(b *testing.B) { b.ReportAllocs() + request, err := http.NewRequest(http.MethodGet, "/does-not-matter", nil) + require.NoError(b, err) points := []promql.Point{} for i := 0; i < 10000; i++ { points = append(points, promql.Point{V: float64(i * 1000000), T: int64(i)}) @@ -3297,7 +3192,7 @@ func BenchmarkRespond(b *testing.B) { b.ResetTimer() api := API{} for n := 0; n < b.N; n++ { - api.respond(&testResponseWriter, response, nil) + api.respond(&testResponseWriter, request, response, nil) } } @@ -3408,3 +3303,20 @@ func TestGetGlobalURL(t *testing.T) { }) } } + +type testCodec struct { + contentType string + canEncode bool +} + +func (t *testCodec) ContentType() string { + return t.contentType +} + +func (t *testCodec) CanEncode(_ *Response) bool { + return t.canEncode +} + +func (t *testCodec) Encode(_ *Response) ([]byte, error) { + return []byte(fmt.Sprintf("response from %v codec", t.contentType)), nil +} diff --git a/web/api/v1/codec.go b/web/api/v1/codec.go new file mode 100644 index 0000000000..d11bb1fa01 --- /dev/null +++ b/web/api/v1/codec.go @@ -0,0 +1,26 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +// A Codec performs encoding of API responses. +type Codec interface { + // ContentType returns the MIME time that this Codec emits. + ContentType() string + + // CanEncode determines if this Codec can encode resp. + CanEncode(resp *Response) bool + + // Encode encodes resp, ready for transmission to an API consumer. + Encode(resp *Response) ([]byte, error) +} diff --git a/web/api/v1/json_codec.go b/web/api/v1/json_codec.go new file mode 100644 index 0000000000..b38dab0385 --- /dev/null +++ b/web/api/v1/json_codec.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import jsoniter "github.com/json-iterator/go" + +// JSONCodec is a Codec that encodes API responses as JSON. +type JSONCodec struct{} + +func (j JSONCodec) ContentType() string { + return "application/json" +} + +func (j JSONCodec) CanEncode(_ *Response) bool { + return true +} + +func (j JSONCodec) Encode(resp *Response) ([]byte, error) { + json := jsoniter.ConfigCompatibleWithStandardLibrary + return json.Marshal(resp) +} diff --git a/web/api/v1/json_codec_test.go b/web/api/v1/json_codec_test.go new file mode 100644 index 0000000000..c5b030ff9a --- /dev/null +++ b/web/api/v1/json_codec_test.go @@ -0,0 +1,178 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "math" + "testing" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser" +) + +func TestJsonCodec_Encode(t *testing.T) { + cases := []struct { + response interface{} + expected string + }{ + { + response: &queryData{ + ResultType: parser.ValueTypeMatrix, + Result: promql.Matrix{ + promql.Series{ + Points: []promql.Point{{V: 1, T: 1000}}, + Metric: labels.FromStrings("__name__", "foo"), + }, + }, + }, + expected: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"foo"},"values":[[1,"1"]]}]}}`, + }, + { + response: &queryData{ + ResultType: parser.ValueTypeMatrix, + Result: promql.Matrix{ + promql.Series{ + Points: []promql.Point{{H: &histogram.FloatHistogram{ + Schema: 2, + ZeroThreshold: 0.001, + ZeroCount: 12, + Count: 10, + Sum: 20, + PositiveSpans: []histogram.Span{ + {Offset: 3, Length: 2}, + {Offset: 1, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: 2, Length: 2}, + }, + PositiveBuckets: []float64{1, 2, 2, 1, 1}, + NegativeBuckets: []float64{2, 1}, + }, T: 1000}}, + Metric: labels.FromStrings("__name__", "foo"), + }, + }, + }, + expected: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"foo"},"histograms":[[1,{"count":"10","sum":"20","buckets":[[1,"-1.6817928305074288","-1.414213562373095","1"],[1,"-1.414213562373095","-1.189207115002721","2"],[3,"-0.001","0.001","12"],[0,"1.414213562373095","1.6817928305074288","1"],[0,"1.6817928305074288","2","2"],[0,"2.378414230005442","2.82842712474619","2"],[0,"2.82842712474619","3.3635856610148576","1"],[0,"3.3635856610148576","4","1"]]}]]}]}}`, + }, + { + response: promql.Point{V: 0, T: 0}, + expected: `{"status":"success","data":[0,"0"]}`, + }, + { + response: promql.Point{V: 20, T: 1}, + expected: `{"status":"success","data":[0.001,"20"]}`, + }, + { + response: promql.Point{V: 20, T: 10}, + expected: `{"status":"success","data":[0.010,"20"]}`, + }, + { + response: promql.Point{V: 20, T: 100}, + expected: `{"status":"success","data":[0.100,"20"]}`, + }, + { + response: promql.Point{V: 20, T: 1001}, + expected: `{"status":"success","data":[1.001,"20"]}`, + }, + { + response: promql.Point{V: 20, T: 1010}, + expected: `{"status":"success","data":[1.010,"20"]}`, + }, + { + response: promql.Point{V: 20, T: 1100}, + expected: `{"status":"success","data":[1.100,"20"]}`, + }, + { + response: promql.Point{V: 20, T: 12345678123456555}, + expected: `{"status":"success","data":[12345678123456.555,"20"]}`, + }, + { + response: promql.Point{V: 20, T: -1}, + expected: `{"status":"success","data":[-0.001,"20"]}`, + }, + { + response: promql.Point{V: math.NaN(), T: 0}, + expected: `{"status":"success","data":[0,"NaN"]}`, + }, + { + response: promql.Point{V: math.Inf(1), T: 0}, + expected: `{"status":"success","data":[0,"+Inf"]}`, + }, + { + response: promql.Point{V: math.Inf(-1), T: 0}, + expected: `{"status":"success","data":[0,"-Inf"]}`, + }, + { + response: promql.Point{V: 1.2345678e6, T: 0}, + expected: `{"status":"success","data":[0,"1234567.8"]}`, + }, + { + response: promql.Point{V: 1.2345678e-6, T: 0}, + expected: `{"status":"success","data":[0,"0.0000012345678"]}`, + }, + { + response: promql.Point{V: 1.2345678e-67, T: 0}, + expected: `{"status":"success","data":[0,"1.2345678e-67"]}`, + }, + { + response: []exemplar.QueryResult{ + { + SeriesLabels: labels.FromStrings("foo", "bar"), + Exemplars: []exemplar.Exemplar{ + { + Labels: labels.FromStrings("traceID", "abc"), + Value: 100.123, + Ts: 1234, + }, + }, + }, + }, + expected: `{"status":"success","data":[{"seriesLabels":{"foo":"bar"},"exemplars":[{"labels":{"traceID":"abc"},"value":"100.123","timestamp":1.234}]}]}`, + }, + { + response: []exemplar.QueryResult{ + { + SeriesLabels: labels.FromStrings("foo", "bar"), + Exemplars: []exemplar.Exemplar{ + { + Labels: labels.FromStrings("traceID", "abc"), + Value: math.Inf(1), + Ts: 1234, + }, + }, + }, + }, + expected: `{"status":"success","data":[{"seriesLabels":{"foo":"bar"},"exemplars":[{"labels":{"traceID":"abc"},"value":"+Inf","timestamp":1.234}]}]}`, + }, + } + + codec := JSONCodec{} + + for _, c := range cases { + body, err := codec.Encode(&Response{ + Status: statusSuccess, + Data: c.response, + }) + if err != nil { + t.Fatalf("Error encoding response body: %s", err) + } + + if string(body) != c.expected { + t.Fatalf("Expected response \n%v\n but got \n%v\n", c.expected, string(body)) + } + } +} From a0dd1468be609a0b4933ec094777f36e8d743e6c Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Tue, 31 Jan 2023 14:53:20 +1100 Subject: [PATCH 09/40] Move custom jsoniter code into json_codec.go. Signed-off-by: Charles Korn --- web/api/v1/api.go | 255 ------------------------------------- web/api/v1/json_codec.go | 262 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 261 insertions(+), 256 deletions(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 525bc446ae..e0ad76c5cb 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -27,7 +27,6 @@ import ( "strconv" "strings" "time" - "unsafe" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -40,8 +39,6 @@ import ( "golang.org/x/exp/slices" "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/model/exemplar" - "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/model/timestamp" @@ -54,7 +51,6 @@ import ( "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/util/httputil" - "github.com/prometheus/prometheus/util/jsonutil" "github.com/prometheus/prometheus/util/stats" ) @@ -215,13 +211,6 @@ type API struct { codecs map[string]Codec } -func init() { - jsoniter.RegisterTypeEncoderFunc("promql.Series", marshalSeriesJSON, marshalSeriesJSONIsEmpty) - jsoniter.RegisterTypeEncoderFunc("promql.Sample", marshalSampleJSON, marshalSampleJSONIsEmpty) - jsoniter.RegisterTypeEncoderFunc("promql.Point", marshalPointJSON, marshalPointJSONIsEmpty) - jsoniter.RegisterTypeEncoderFunc("exemplar.Exemplar", marshalExemplarJSON, marshalExemplarJSONEmpty) -} - // NewAPI returns an initialized API type. func NewAPI( qe QueryEngine, @@ -1724,247 +1713,3 @@ OUTER: } return matcherSets, nil } - -// marshalSeriesJSON writes something like the following: -// -// { -// "metric" : { -// "__name__" : "up", -// "job" : "prometheus", -// "instance" : "localhost:9090" -// }, -// "values": [ -// [ 1435781451.781, "1" ], -// < more values> -// ], -// "histograms": [ -// [ 1435781451.781, { < histogram, see below > } ], -// < more histograms > -// ], -// }, -func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { - s := *((*promql.Series)(ptr)) - stream.WriteObjectStart() - stream.WriteObjectField(`metric`) - m, err := s.Metric.MarshalJSON() - if err != nil { - stream.Error = err - return - } - stream.SetBuffer(append(stream.Buffer(), m...)) - - // We make two passes through the series here: In the first marshaling - // all value points, in the second marshaling all histogram - // points. That's probably cheaper than just one pass in which we copy - // out histogram Points into a newly allocated slice for separate - // marshaling. (Could be benchmarked, though.) - var foundValue, foundHistogram bool - for _, p := range s.Points { - if p.H == nil { - stream.WriteMore() - if !foundValue { - stream.WriteObjectField(`values`) - stream.WriteArrayStart() - } - foundValue = true - marshalPointJSON(unsafe.Pointer(&p), stream) - } else { - foundHistogram = true - } - } - if foundValue { - stream.WriteArrayEnd() - } - if foundHistogram { - firstHistogram := true - for _, p := range s.Points { - if p.H != nil { - stream.WriteMore() - if firstHistogram { - stream.WriteObjectField(`histograms`) - stream.WriteArrayStart() - } - firstHistogram = false - marshalPointJSON(unsafe.Pointer(&p), stream) - } - } - stream.WriteArrayEnd() - } - stream.WriteObjectEnd() -} - -func marshalSeriesJSONIsEmpty(ptr unsafe.Pointer) bool { - return false -} - -// marshalSampleJSON writes something like the following for normal value samples: -// -// { -// "metric" : { -// "__name__" : "up", -// "job" : "prometheus", -// "instance" : "localhost:9090" -// }, -// "value": [ 1435781451.781, "1" ] -// }, -// -// For histogram samples, it writes something like this: -// -// { -// "metric" : { -// "__name__" : "up", -// "job" : "prometheus", -// "instance" : "localhost:9090" -// }, -// "histogram": [ 1435781451.781, { < histogram, see below > } ] -// }, -func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { - s := *((*promql.Sample)(ptr)) - stream.WriteObjectStart() - stream.WriteObjectField(`metric`) - m, err := s.Metric.MarshalJSON() - if err != nil { - stream.Error = err - return - } - stream.SetBuffer(append(stream.Buffer(), m...)) - stream.WriteMore() - if s.Point.H == nil { - stream.WriteObjectField(`value`) - } else { - stream.WriteObjectField(`histogram`) - } - marshalPointJSON(unsafe.Pointer(&s.Point), stream) - stream.WriteObjectEnd() -} - -func marshalSampleJSONIsEmpty(ptr unsafe.Pointer) bool { - return false -} - -// marshalPointJSON writes `[ts, "val"]`. -func marshalPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { - p := *((*promql.Point)(ptr)) - stream.WriteArrayStart() - jsonutil.MarshalTimestamp(p.T, stream) - stream.WriteMore() - if p.H == nil { - jsonutil.MarshalValue(p.V, stream) - } else { - marshalHistogram(p.H, stream) - } - stream.WriteArrayEnd() -} - -func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool { - return false -} - -// marshalHistogramJSON writes something like: -// -// { -// "count": "42", -// "sum": "34593.34", -// "buckets": [ -// [ 3, "-0.25", "0.25", "3"], -// [ 0, "0.25", "0.5", "12"], -// [ 0, "0.5", "1", "21"], -// [ 0, "2", "4", "6"] -// ] -// } -// -// The 1st element in each bucket array determines if the boundaries are -// inclusive (AKA closed) or exclusive (AKA open): -// -// 0: lower exclusive, upper inclusive -// 1: lower inclusive, upper exclusive -// 2: both exclusive -// 3: both inclusive -// -// The 2nd and 3rd elements are the lower and upper boundary. The 4th element is -// the bucket count. -func marshalHistogram(h *histogram.FloatHistogram, stream *jsoniter.Stream) { - stream.WriteObjectStart() - stream.WriteObjectField(`count`) - jsonutil.MarshalValue(h.Count, stream) - stream.WriteMore() - stream.WriteObjectField(`sum`) - jsonutil.MarshalValue(h.Sum, stream) - - bucketFound := false - it := h.AllBucketIterator() - for it.Next() { - bucket := it.At() - if bucket.Count == 0 { - continue // No need to expose empty buckets in JSON. - } - stream.WriteMore() - if !bucketFound { - stream.WriteObjectField(`buckets`) - stream.WriteArrayStart() - } - bucketFound = true - boundaries := 2 // Exclusive on both sides AKA open interval. - if bucket.LowerInclusive { - if bucket.UpperInclusive { - boundaries = 3 // Inclusive on both sides AKA closed interval. - } else { - boundaries = 1 // Inclusive only on lower end AKA right open. - } - } else { - if bucket.UpperInclusive { - boundaries = 0 // Inclusive only on upper end AKA left open. - } - } - stream.WriteArrayStart() - stream.WriteInt(boundaries) - stream.WriteMore() - jsonutil.MarshalValue(bucket.Lower, stream) - stream.WriteMore() - jsonutil.MarshalValue(bucket.Upper, stream) - stream.WriteMore() - jsonutil.MarshalValue(bucket.Count, stream) - stream.WriteArrayEnd() - } - if bucketFound { - stream.WriteArrayEnd() - } - stream.WriteObjectEnd() -} - -// marshalExemplarJSON writes. -// -// { -// labels: , -// value: "", -// timestamp: -// } -func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { - p := *((*exemplar.Exemplar)(ptr)) - stream.WriteObjectStart() - - // "labels" key. - stream.WriteObjectField(`labels`) - lbls, err := p.Labels.MarshalJSON() - if err != nil { - stream.Error = err - return - } - stream.SetBuffer(append(stream.Buffer(), lbls...)) - - // "value" key. - stream.WriteMore() - stream.WriteObjectField(`value`) - jsonutil.MarshalValue(p.Value, stream) - - // "timestamp" key. - stream.WriteMore() - stream.WriteObjectField(`timestamp`) - jsonutil.MarshalTimestamp(p.Ts, stream) - - stream.WriteObjectEnd() -} - -func marshalExemplarJSONEmpty(ptr unsafe.Pointer) bool { - return false -} diff --git a/web/api/v1/json_codec.go b/web/api/v1/json_codec.go index b38dab0385..455af717d0 100644 --- a/web/api/v1/json_codec.go +++ b/web/api/v1/json_codec.go @@ -13,7 +13,23 @@ package v1 -import jsoniter "github.com/json-iterator/go" +import ( + "unsafe" + + jsoniter "github.com/json-iterator/go" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/util/jsonutil" +) + +func init() { + jsoniter.RegisterTypeEncoderFunc("promql.Series", marshalSeriesJSON, marshalSeriesJSONIsEmpty) + jsoniter.RegisterTypeEncoderFunc("promql.Sample", marshalSampleJSON, marshalSampleJSONIsEmpty) + jsoniter.RegisterTypeEncoderFunc("promql.Point", marshalPointJSON, marshalPointJSONIsEmpty) + jsoniter.RegisterTypeEncoderFunc("exemplar.Exemplar", marshalExemplarJSON, marshalExemplarJSONEmpty) +} // JSONCodec is a Codec that encodes API responses as JSON. type JSONCodec struct{} @@ -30,3 +46,247 @@ func (j JSONCodec) Encode(resp *Response) ([]byte, error) { json := jsoniter.ConfigCompatibleWithStandardLibrary return json.Marshal(resp) } + +// marshalSeriesJSON writes something like the following: +// +// { +// "metric" : { +// "__name__" : "up", +// "job" : "prometheus", +// "instance" : "localhost:9090" +// }, +// "values": [ +// [ 1435781451.781, "1" ], +// < more values> +// ], +// "histograms": [ +// [ 1435781451.781, { < histogram, see below > } ], +// < more histograms > +// ], +// }, +func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { + s := *((*promql.Series)(ptr)) + stream.WriteObjectStart() + stream.WriteObjectField(`metric`) + m, err := s.Metric.MarshalJSON() + if err != nil { + stream.Error = err + return + } + stream.SetBuffer(append(stream.Buffer(), m...)) + + // We make two passes through the series here: In the first marshaling + // all value points, in the second marshaling all histogram + // points. That's probably cheaper than just one pass in which we copy + // out histogram Points into a newly allocated slice for separate + // marshaling. (Could be benchmarked, though.) + var foundValue, foundHistogram bool + for _, p := range s.Points { + if p.H == nil { + stream.WriteMore() + if !foundValue { + stream.WriteObjectField(`values`) + stream.WriteArrayStart() + } + foundValue = true + marshalPointJSON(unsafe.Pointer(&p), stream) + } else { + foundHistogram = true + } + } + if foundValue { + stream.WriteArrayEnd() + } + if foundHistogram { + firstHistogram := true + for _, p := range s.Points { + if p.H != nil { + stream.WriteMore() + if firstHistogram { + stream.WriteObjectField(`histograms`) + stream.WriteArrayStart() + } + firstHistogram = false + marshalPointJSON(unsafe.Pointer(&p), stream) + } + } + stream.WriteArrayEnd() + } + stream.WriteObjectEnd() +} + +func marshalSeriesJSONIsEmpty(ptr unsafe.Pointer) bool { + return false +} + +// marshalSampleJSON writes something like the following for normal value samples: +// +// { +// "metric" : { +// "__name__" : "up", +// "job" : "prometheus", +// "instance" : "localhost:9090" +// }, +// "value": [ 1435781451.781, "1" ] +// }, +// +// For histogram samples, it writes something like this: +// +// { +// "metric" : { +// "__name__" : "up", +// "job" : "prometheus", +// "instance" : "localhost:9090" +// }, +// "histogram": [ 1435781451.781, { < histogram, see below > } ] +// }, +func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { + s := *((*promql.Sample)(ptr)) + stream.WriteObjectStart() + stream.WriteObjectField(`metric`) + m, err := s.Metric.MarshalJSON() + if err != nil { + stream.Error = err + return + } + stream.SetBuffer(append(stream.Buffer(), m...)) + stream.WriteMore() + if s.Point.H == nil { + stream.WriteObjectField(`value`) + } else { + stream.WriteObjectField(`histogram`) + } + marshalPointJSON(unsafe.Pointer(&s.Point), stream) + stream.WriteObjectEnd() +} + +func marshalSampleJSONIsEmpty(ptr unsafe.Pointer) bool { + return false +} + +// marshalPointJSON writes `[ts, "val"]`. +func marshalPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { + p := *((*promql.Point)(ptr)) + stream.WriteArrayStart() + jsonutil.MarshalTimestamp(p.T, stream) + stream.WriteMore() + if p.H == nil { + jsonutil.MarshalValue(p.V, stream) + } else { + marshalHistogram(p.H, stream) + } + stream.WriteArrayEnd() +} + +func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool { + return false +} + +// marshalHistogramJSON writes something like: +// +// { +// "count": "42", +// "sum": "34593.34", +// "buckets": [ +// [ 3, "-0.25", "0.25", "3"], +// [ 0, "0.25", "0.5", "12"], +// [ 0, "0.5", "1", "21"], +// [ 0, "2", "4", "6"] +// ] +// } +// +// The 1st element in each bucket array determines if the boundaries are +// inclusive (AKA closed) or exclusive (AKA open): +// +// 0: lower exclusive, upper inclusive +// 1: lower inclusive, upper exclusive +// 2: both exclusive +// 3: both inclusive +// +// The 2nd and 3rd elements are the lower and upper boundary. The 4th element is +// the bucket count. +func marshalHistogram(h *histogram.FloatHistogram, stream *jsoniter.Stream) { + stream.WriteObjectStart() + stream.WriteObjectField(`count`) + jsonutil.MarshalValue(h.Count, stream) + stream.WriteMore() + stream.WriteObjectField(`sum`) + jsonutil.MarshalValue(h.Sum, stream) + + bucketFound := false + it := h.AllBucketIterator() + for it.Next() { + bucket := it.At() + if bucket.Count == 0 { + continue // No need to expose empty buckets in JSON. + } + stream.WriteMore() + if !bucketFound { + stream.WriteObjectField(`buckets`) + stream.WriteArrayStart() + } + bucketFound = true + boundaries := 2 // Exclusive on both sides AKA open interval. + if bucket.LowerInclusive { + if bucket.UpperInclusive { + boundaries = 3 // Inclusive on both sides AKA closed interval. + } else { + boundaries = 1 // Inclusive only on lower end AKA right open. + } + } else { + if bucket.UpperInclusive { + boundaries = 0 // Inclusive only on upper end AKA left open. + } + } + stream.WriteArrayStart() + stream.WriteInt(boundaries) + stream.WriteMore() + jsonutil.MarshalValue(bucket.Lower, stream) + stream.WriteMore() + jsonutil.MarshalValue(bucket.Upper, stream) + stream.WriteMore() + jsonutil.MarshalValue(bucket.Count, stream) + stream.WriteArrayEnd() + } + if bucketFound { + stream.WriteArrayEnd() + } + stream.WriteObjectEnd() +} + +// marshalExemplarJSON writes. +// +// { +// labels: , +// value: "", +// timestamp: +// } +func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { + p := *((*exemplar.Exemplar)(ptr)) + stream.WriteObjectStart() + + // "labels" key. + stream.WriteObjectField(`labels`) + lbls, err := p.Labels.MarshalJSON() + if err != nil { + stream.Error = err + return + } + stream.SetBuffer(append(stream.Buffer(), lbls...)) + + // "value" key. + stream.WriteMore() + stream.WriteObjectField(`value`) + jsonutil.MarshalValue(p.Value, stream) + + // "timestamp" key. + stream.WriteMore() + stream.WriteObjectField(`timestamp`) + jsonutil.MarshalTimestamp(p.Ts, stream) + + stream.WriteObjectEnd() +} + +func marshalExemplarJSONEmpty(ptr unsafe.Pointer) bool { + return false +} From 857b23873f54afa642e4961071c326ea1e00eddf Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Thu, 2 Feb 2023 15:29:13 +1100 Subject: [PATCH 10/40] Expose QueryData so that implementations of Codec.CanEncode() can perform a type assertion against Response.Data. Signed-off-by: Charles Korn --- web/api/v1/api.go | 6 +++--- web/api/v1/api_test.go | 28 ++++++++++++++-------------- web/api/v1/json_codec_test.go | 4 ++-- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index e0ad76c5cb..e9c1182ca1 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -384,7 +384,7 @@ func (api *API) Register(r *route.Router) { r.Put("/admin/tsdb/snapshot", wrapAgent(api.snapshot)) } -type queryData struct { +type QueryData struct { ResultType parser.ValueType `json:"resultType"` Result parser.Value `json:"result"` Stats stats.QueryStats `json:"stats,omitempty"` @@ -446,7 +446,7 @@ func (api *API) query(r *http.Request) (result apiFuncResult) { } qs := sr(ctx, qry.Stats(), r.FormValue("stats")) - return apiFuncResult{&queryData{ + return apiFuncResult{&QueryData{ ResultType: res.Value.Type(), Result: res.Value, Stats: qs, @@ -537,7 +537,7 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) { } qs := sr(ctx, qry.Stats(), r.FormValue("stats")) - return apiFuncResult{&queryData{ + return apiFuncResult{&QueryData{ ResultType: res.Value.Type(), Result: res.Value, Stats: qs, diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 617a8bdf3c..455eae13f7 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -831,8 +831,8 @@ func TestStats(t *testing.T) { name: "stats is blank", param: "", expected: func(t *testing.T, i interface{}) { - require.IsType(t, i, &queryData{}) - qd := i.(*queryData) + require.IsType(t, i, &QueryData{}) + qd := i.(*QueryData) require.Nil(t, qd.Stats) }, }, @@ -840,8 +840,8 @@ func TestStats(t *testing.T) { name: "stats is true", param: "true", expected: func(t *testing.T, i interface{}) { - require.IsType(t, i, &queryData{}) - qd := i.(*queryData) + require.IsType(t, i, &QueryData{}) + qd := i.(*QueryData) require.NotNil(t, qd.Stats) qs := qd.Stats.Builtin() require.NotNil(t, qs.Timings) @@ -855,8 +855,8 @@ func TestStats(t *testing.T) { name: "stats is all", param: "all", expected: func(t *testing.T, i interface{}) { - require.IsType(t, i, &queryData{}) - qd := i.(*queryData) + require.IsType(t, i, &QueryData{}) + qd := i.(*QueryData) require.NotNil(t, qd.Stats) qs := qd.Stats.Builtin() require.NotNil(t, qs.Timings) @@ -876,8 +876,8 @@ func TestStats(t *testing.T) { }, param: "known", expected: func(t *testing.T, i interface{}) { - require.IsType(t, i, &queryData{}) - qd := i.(*queryData) + require.IsType(t, i, &QueryData{}) + qd := i.(*QueryData) require.NotNil(t, qd.Stats) j, err := json.Marshal(qd.Stats) require.NoError(t, err) @@ -1037,7 +1037,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "query": []string{"2"}, "time": []string{"123.4"}, }, - response: &queryData{ + response: &QueryData{ ResultType: parser.ValueTypeScalar, Result: promql.Scalar{ V: 2, @@ -1051,7 +1051,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "query": []string{"0.333"}, "time": []string{"1970-01-01T00:02:03Z"}, }, - response: &queryData{ + response: &QueryData{ ResultType: parser.ValueTypeScalar, Result: promql.Scalar{ V: 0.333, @@ -1065,7 +1065,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "query": []string{"0.333"}, "time": []string{"1970-01-01T01:02:03+01:00"}, }, - response: &queryData{ + response: &QueryData{ ResultType: parser.ValueTypeScalar, Result: promql.Scalar{ V: 0.333, @@ -1078,7 +1078,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E query: url.Values{ "query": []string{"0.333"}, }, - response: &queryData{ + response: &QueryData{ ResultType: parser.ValueTypeScalar, Result: promql.Scalar{ V: 0.333, @@ -1094,7 +1094,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "end": []string{"2"}, "step": []string{"1"}, }, - response: &queryData{ + response: &QueryData{ ResultType: parser.ValueTypeMatrix, Result: promql.Matrix{ promql.Series{ @@ -3180,7 +3180,7 @@ func BenchmarkRespond(b *testing.B) { for i := 0; i < 10000; i++ { points = append(points, promql.Point{V: float64(i * 1000000), T: int64(i)}) } - response := &queryData{ + response := &QueryData{ ResultType: parser.ValueTypeMatrix, Result: promql.Matrix{ promql.Series{ diff --git a/web/api/v1/json_codec_test.go b/web/api/v1/json_codec_test.go index c5b030ff9a..20b8ac3a0c 100644 --- a/web/api/v1/json_codec_test.go +++ b/web/api/v1/json_codec_test.go @@ -30,7 +30,7 @@ func TestJsonCodec_Encode(t *testing.T) { expected string }{ { - response: &queryData{ + response: &QueryData{ ResultType: parser.ValueTypeMatrix, Result: promql.Matrix{ promql.Series{ @@ -42,7 +42,7 @@ func TestJsonCodec_Encode(t *testing.T) { expected: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"foo"},"values":[[1,"1"]]}]}}`, }, { - response: &queryData{ + response: &QueryData{ ResultType: parser.ValueTypeMatrix, Result: promql.Matrix{ promql.Series{ From deba5120ead5af493e8a4dac4eb0171596b983e3 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Sat, 11 Feb 2023 15:34:25 +0100 Subject: [PATCH 11/40] Address PR feeedback: reduce log level. Signed-off-by: Charles Korn --- web/api/v1/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index e9c1182ca1..6c912e9e8f 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1602,7 +1602,7 @@ func (api *API) negotiateCodec(req *http.Request, resp *Response) Codec { } } - level.Warn(api.logger).Log("msg", "could not find suitable codec for response, falling back to default codec", "accept_header", acceptHeader) + level.Debug(api.logger).Log("msg", "could not find suitable codec for response, falling back to default codec", "accept_header", acceptHeader) return defaultCodec } From 46a28899a0e86b4b073bb3172a2ac4013ba87807 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Mon, 27 Feb 2023 13:27:09 +1100 Subject: [PATCH 12/40] Implement fully-featured content negotiation for API requests, and allow overriding the default API codec. Signed-off-by: Charles Korn --- go.mod | 2 +- web/api/v1/api.go | 78 ++++++++++++++++++++-------------------- web/api/v1/api_test.go | 41 ++++++++++++++++++--- web/api/v1/codec.go | 29 ++++++++++++++- web/api/v1/codec_test.go | 68 +++++++++++++++++++++++++++++++++++ web/api/v1/json_codec.go | 4 +-- 6 files changed, 175 insertions(+), 47 deletions(-) create mode 100644 web/api/v1/codec_test.go diff --git a/go.mod b/go.mod index ac94408e4b..a12b3505d8 100644 --- a/go.mod +++ b/go.mod @@ -160,7 +160,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 10cf6885db..cfac908fe8 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -32,6 +32,7 @@ import ( "github.com/go-kit/log/level" "github.com/grafana/regexp" jsoniter "github.com/json-iterator/go" + "github.com/munnerz/goautoneg" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -68,20 +69,19 @@ const ( type errorType string const ( - errorNone errorType = "" - errorTimeout errorType = "timeout" - errorCanceled errorType = "canceled" - errorExec errorType = "execution" - errorBadData errorType = "bad_data" - errorInternal errorType = "internal" - errorUnavailable errorType = "unavailable" - errorNotFound errorType = "not_found" + errorNone errorType = "" + errorTimeout errorType = "timeout" + errorCanceled errorType = "canceled" + errorExec errorType = "execution" + errorBadData errorType = "bad_data" + errorInternal errorType = "internal" + errorUnavailable errorType = "unavailable" + errorNotFound errorType = "not_found" + errorNotAcceptable errorType = "not_acceptable" ) var LocalhostRepresentations = []string{"127.0.0.1", "localhost", "::1"} -var defaultCodec = JSONCodec{} - type apiError struct { typ errorType err error @@ -212,7 +212,7 @@ type API struct { remoteWriteHandler http.Handler remoteReadHandler http.Handler - codecs map[string]Codec + codecs []Codec } // NewAPI returns an initialized API type. @@ -271,11 +271,9 @@ func NewAPI( statsRenderer: defaultStatsRenderer, remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame), - - codecs: map[string]Codec{}, } - a.InstallCodec(defaultCodec) + a.InstallCodec(JSONCodec{}) if statsRenderer != nil { a.statsRenderer = statsRenderer @@ -289,13 +287,15 @@ func NewAPI( } // InstallCodec adds codec to this API's available codecs. -// If codec handles a content type handled by a codec already installed in this API, codec replaces the previous codec. +// Codecs installed first take precedence over codecs installed later when evaluating wildcards in Accept headers. +// The first installed codec is used as a fallback when the Accept header cannot be satisfied or if there is no Accept header. func (api *API) InstallCodec(codec Codec) { - if api.codecs == nil { - api.codecs = map[string]Codec{} - } + api.codecs = append(api.codecs, codec) +} - api.codecs[codec.ContentType()] = codec +// ClearCodecs removes all available codecs from this API, including the default codec installed by NewAPI. +func (api *API) ClearCodecs() { + api.codecs = nil } func setUnavailStatusOnTSDBNotReady(r apiFuncResult) apiFuncResult { @@ -1583,7 +1583,12 @@ func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface Warnings: warningStrings, } - codec := api.negotiateCodec(req, resp) + codec, err := api.negotiateCodec(req, resp) + if err != nil { + api.respondError(w, &apiError{errorNotAcceptable, err}, nil) + return + } + b, err := codec.Encode(resp) if err != nil { level.Error(api.logger).Log("msg", "error marshaling response", "err", err) @@ -1591,33 +1596,28 @@ func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface return } - w.Header().Set("Content-Type", codec.ContentType()) + w.Header().Set("Content-Type", codec.ContentType().String()) w.WriteHeader(http.StatusOK) if n, err := w.Write(b); err != nil { level.Error(api.logger).Log("msg", "error writing response", "bytesWritten", n, "err", err) } } -// HTTP content negotiation is hard (see https://developer.mozilla.org/en-US/docs/Web/HTTP/Content_negotiation). -// Ideally, we shouldn't be implementing this ourselves - https://github.com/golang/go/issues/19307 is an open proposal to add -// this to the Go stdlib and has links to a number of other implementations. -// -// This is an MVP, and doesn't support features like wildcards or weighting. -func (api *API) negotiateCodec(req *http.Request, resp *Response) Codec { - acceptHeader := req.Header.Get("Accept") - if acceptHeader == "" { - return defaultCodec - } - - for _, contentType := range strings.Split(acceptHeader, ",") { - codec, ok := api.codecs[strings.TrimSpace(contentType)] - if ok && codec.CanEncode(resp) { - return codec +func (api *API) negotiateCodec(req *http.Request, resp *Response) (Codec, error) { + for _, clause := range goautoneg.ParseAccept(req.Header.Get("Accept")) { + for _, codec := range api.codecs { + if codec.ContentType().Satisfies(clause) && codec.CanEncode(resp) { + return codec, nil + } } } - level.Debug(api.logger).Log("msg", "could not find suitable codec for response, falling back to default codec", "accept_header", acceptHeader) - return defaultCodec + defaultCodec := api.codecs[0] + if !defaultCodec.CanEncode(resp) { + return nil, fmt.Errorf("cannot encode response as %s", defaultCodec.ContentType()) + } + + return defaultCodec, nil } func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data interface{}) { @@ -1648,6 +1648,8 @@ func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data inter code = http.StatusInternalServerError case errorNotFound: code = http.StatusNotFound + case errorNotAcceptable: + code = http.StatusNotAcceptable default: code = http.StatusInternalServerError } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 0531c4fe53..90cf084ac0 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -2769,9 +2769,11 @@ func TestRespondSuccess(t *testing.T) { logger: log.NewNopLogger(), } - api.InstallCodec(&testCodec{contentType: "test/cannot-encode", canEncode: false}) - api.InstallCodec(&testCodec{contentType: "test/can-encode", canEncode: true}) - api.InstallCodec(&testCodec{contentType: "test/can-encode-2", canEncode: true}) + api.ClearCodecs() + api.InstallCodec(JSONCodec{}) + api.InstallCodec(&testCodec{contentType: MIMEType{"test", "cannot-encode"}, canEncode: false}) + api.InstallCodec(&testCodec{contentType: MIMEType{"test", "can-encode"}, canEncode: true}) + api.InstallCodec(&testCodec{contentType: MIMEType{"test", "can-encode-2"}, canEncode: true}) s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { api.respond(w, r, "test", nil) @@ -2854,6 +2856,34 @@ func TestRespondSuccess(t *testing.T) { } } +func TestRespondSuccess_DefaultCodecCannotEncodeResponse(t *testing.T) { + api := API{ + logger: log.NewNopLogger(), + } + + api.ClearCodecs() + api.InstallCodec(&testCodec{contentType: MIMEType{"application", "default-format"}, canEncode: false}) + + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + api.respond(w, r, "test", nil) + })) + defer s.Close() + + req, err := http.NewRequest(http.MethodGet, s.URL, nil) + require.NoError(t, err) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + body, err := io.ReadAll(resp.Body) + defer resp.Body.Close() + require.NoError(t, err) + + require.Equal(t, http.StatusNotAcceptable, resp.StatusCode) + require.Equal(t, "application/json", resp.Header.Get("Content-Type")) + require.Equal(t, `{"status":"error","errorType":"not_acceptable","error":"cannot encode response as application/default-format"}`, string(body)) +} + func TestRespondError(t *testing.T) { s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { api := API{} @@ -3193,6 +3223,7 @@ func BenchmarkRespond(b *testing.B) { } b.ResetTimer() api := API{} + api.InstallCodec(JSONCodec{}) for n := 0; n < b.N; n++ { api.respond(&testResponseWriter, request, response, nil) } @@ -3307,11 +3338,11 @@ func TestGetGlobalURL(t *testing.T) { } type testCodec struct { - contentType string + contentType MIMEType canEncode bool } -func (t *testCodec) ContentType() string { +func (t *testCodec) ContentType() MIMEType { return t.contentType } diff --git a/web/api/v1/codec.go b/web/api/v1/codec.go index d11bb1fa01..492e00a74a 100644 --- a/web/api/v1/codec.go +++ b/web/api/v1/codec.go @@ -13,10 +13,12 @@ package v1 +import "github.com/munnerz/goautoneg" + // A Codec performs encoding of API responses. type Codec interface { // ContentType returns the MIME time that this Codec emits. - ContentType() string + ContentType() MIMEType // CanEncode determines if this Codec can encode resp. CanEncode(resp *Response) bool @@ -24,3 +26,28 @@ type Codec interface { // Encode encodes resp, ready for transmission to an API consumer. Encode(resp *Response) ([]byte, error) } + +type MIMEType struct { + Type string + SubType string +} + +func (m MIMEType) String() string { + return m.Type + "/" + m.SubType +} + +func (m MIMEType) Satisfies(accept goautoneg.Accept) bool { + if accept.Type == "*" && accept.SubType == "*" { + return true + } + + if accept.Type == m.Type && accept.SubType == "*" { + return true + } + + if accept.Type == m.Type && accept.SubType == m.SubType { + return true + } + + return false +} diff --git a/web/api/v1/codec_test.go b/web/api/v1/codec_test.go new file mode 100644 index 0000000000..911bf206e3 --- /dev/null +++ b/web/api/v1/codec_test.go @@ -0,0 +1,68 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "testing" + + "github.com/munnerz/goautoneg" + "github.com/stretchr/testify/require" +) + +func TestMIMEType_String(t *testing.T) { + m := MIMEType{Type: "application", SubType: "json"} + + require.Equal(t, "application/json", m.String()) +} + +func TestMIMEType_Satisfies(t *testing.T) { + m := MIMEType{Type: "application", SubType: "json"} + + scenarios := map[string]struct { + accept goautoneg.Accept + expected bool + }{ + "exact match": { + accept: goautoneg.Accept{Type: "application", SubType: "json"}, + expected: true, + }, + "sub-type wildcard match": { + accept: goautoneg.Accept{Type: "application", SubType: "*"}, + expected: true, + }, + "full wildcard match": { + accept: goautoneg.Accept{Type: "*", SubType: "*"}, + expected: true, + }, + "inverted": { + accept: goautoneg.Accept{Type: "json", SubType: "application"}, + expected: false, + }, + "inverted sub-type wildcard": { + accept: goautoneg.Accept{Type: "json", SubType: "*"}, + expected: false, + }, + "complete mismatch": { + accept: goautoneg.Accept{Type: "text", SubType: "plain"}, + expected: false, + }, + } + + for name, scenario := range scenarios { + t.Run(name, func(t *testing.T) { + actual := m.Satisfies(scenario.accept) + require.Equal(t, scenario.expected, actual) + }) + } +} diff --git a/web/api/v1/json_codec.go b/web/api/v1/json_codec.go index 455af717d0..79ebfee182 100644 --- a/web/api/v1/json_codec.go +++ b/web/api/v1/json_codec.go @@ -34,8 +34,8 @@ func init() { // JSONCodec is a Codec that encodes API responses as JSON. type JSONCodec struct{} -func (j JSONCodec) ContentType() string { - return "application/json" +func (j JSONCodec) ContentType() MIMEType { + return MIMEType{Type: "application", SubType: "json"} } func (j JSONCodec) CanEncode(_ *Response) bool { From 79e4bdee8e0ea7219357bdefdbab62446a091855 Mon Sep 17 00:00:00 2001 From: ianwoolf Date: Sun, 27 Feb 2022 20:49:33 +0800 Subject: [PATCH 13/40] add Close for ActiveQueryTracker to close the file. Signed-off-by: ianwoolf --- promql/engine_test.go | 1 + promql/query_logger.go | 22 ++++++++++++++++------ promql/query_logger_test.go | 5 ++++- 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/promql/engine_test.go b/promql/engine_test.go index b64e32ba46..237ff7239e 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -49,6 +49,7 @@ func TestQueryConcurrency(t *testing.T) { require.NoError(t, err) defer os.RemoveAll(dir) queryTracker := NewActiveQueryTracker(dir, maxConcurrency, nil) + t.Cleanup(queryTracker.Close) opts := EngineOpts{ Logger: nil, diff --git a/promql/query_logger.go b/promql/query_logger.go index 716e7749b9..fa4e1fb079 100644 --- a/promql/query_logger.go +++ b/promql/query_logger.go @@ -16,6 +16,7 @@ package promql import ( "context" "encoding/json" + "io" "os" "path/filepath" "strings" @@ -31,6 +32,7 @@ type ActiveQueryTracker struct { mmapedFile []byte getNextIndex chan int logger log.Logger + closer io.Closer maxConcurrent int } @@ -81,7 +83,7 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { } } -func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, error) { +func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) { file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666) if err != nil { absPath, pathErr := filepath.Abs(filename) @@ -89,22 +91,22 @@ func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, er absPath = filename } level.Error(logger).Log("msg", "Error opening query log file", "file", absPath, "err", err) - return nil, err + return nil, nil, err } err = file.Truncate(int64(filesize)) if err != nil { level.Error(logger).Log("msg", "Error setting filesize.", "filesize", filesize, "err", err) - return nil, err + return nil, nil, err } fileAsBytes, err := mmap.Map(file, mmap.RDWR, 0) if err != nil { level.Error(logger).Log("msg", "Failed to mmap", "file", filename, "Attempted size", filesize, "err", err) - return nil, err + return nil, nil, err } - return fileAsBytes, err + return fileAsBytes, file, err } func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker { @@ -116,7 +118,7 @@ func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger lo filename, filesize := filepath.Join(localStoragePath, "queries.active"), 1+maxConcurrent*entrySize logUnfinishedQueries(filename, filesize, logger) - fileAsBytes, err := getMMapedFile(filename, filesize, logger) + fileAsBytes, closer, err := getMMapedFile(filename, filesize, logger) if err != nil { panic("Unable to create mmap-ed active query log") } @@ -124,6 +126,7 @@ func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger lo copy(fileAsBytes, "[") activeQueryTracker := ActiveQueryTracker{ mmapedFile: fileAsBytes, + closer: closer, getNextIndex: make(chan int, maxConcurrent), logger: logger, maxConcurrent: maxConcurrent, @@ -198,3 +201,10 @@ func (tracker ActiveQueryTracker) Insert(ctx context.Context, query string) (int return 0, ctx.Err() } } + +func (tracker *ActiveQueryTracker) Close() { + if tracker == nil || tracker.closer == nil { + return + } + tracker.closer.Close() +} diff --git a/promql/query_logger_test.go b/promql/query_logger_test.go index ad76fb9929..ce55fecbbb 100644 --- a/promql/query_logger_test.go +++ b/promql/query_logger_test.go @@ -110,7 +110,10 @@ func TestMMapFile(t *testing.T) { filename := file.Name() defer os.Remove(filename) - fileAsBytes, err := getMMapedFile(filename, 2, nil) + fileAsBytes, closer, err := getMMapedFile(filename, 2, nil) + if err != nil { + t.Cleanup(func() { closer.Close() }) + } require.NoError(t, err) copy(fileAsBytes, "ab") From 866fa25b2025a81b31d80ab04aacfb07196cca80 Mon Sep 17 00:00:00 2001 From: "renzheng.wang" Date: Tue, 30 May 2023 20:13:00 +0800 Subject: [PATCH 14/40] add label and labelpresent for endpointslice role in k8s discovery Signed-off-by: renzheng.wang --- discovery/kubernetes/endpoints.go | 24 +--- discovery/kubernetes/endpointslice.go | 4 +- discovery/kubernetes/endpointslice_adaptor.go | 10 ++ discovery/kubernetes/endpointslice_test.go | 122 +++++++++++------- discovery/kubernetes/ingress.go | 28 +--- discovery/kubernetes/ingress_adaptor.go | 25 ++-- discovery/kubernetes/kubernetes.go | 17 +++ discovery/kubernetes/node.go | 23 +--- discovery/kubernetes/pod.go | 21 +-- discovery/kubernetes/service.go | 18 +-- 10 files changed, 134 insertions(+), 158 deletions(-) diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index 2413dab455..e0d5536d1e 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -29,7 +29,6 @@ import ( "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/util/strutil" ) var ( @@ -248,9 +247,6 @@ func endpointsSourceFromNamespaceAndName(namespace, name string) string { } const ( - endpointsLabelPrefix = metaLabelPrefix + "endpoints_label_" - endpointsLabelPresentPrefix = metaLabelPrefix + "endpoints_labelpresent_" - endpointsNameLabel = metaLabelPrefix + "endpoints_name" endpointNodeName = metaLabelPrefix + "endpoint_node_name" endpointHostname = metaLabelPrefix + "endpoint_hostname" endpointReadyLabel = metaLabelPrefix + "endpoint_ready" @@ -265,16 +261,11 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { Source: endpointsSource(eps), } tg.Labels = model.LabelSet{ - namespaceLabel: lv(eps.Namespace), - endpointsNameLabel: lv(eps.Name), + namespaceLabel: lv(eps.Namespace), } e.addServiceLabels(eps.Namespace, eps.Name, tg) // Add endpoints labels metadata. - for k, v := range eps.Labels { - ln := strutil.SanitizeLabelName(k) - tg.Labels[model.LabelName(endpointsLabelPrefix+ln)] = lv(v) - tg.Labels[model.LabelName(endpointsLabelPresentPrefix+ln)] = presentValue - } + addObjectMetaLabels(tg.Labels, eps.ObjectMeta, RoleEndpoint) type podEntry struct { pod *apiv1.Pod @@ -462,14 +453,7 @@ func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger log.L node := obj.(*apiv1.Node) // Allocate one target label for the node name, - // and two target labels for each node label. - nodeLabelset := make(model.LabelSet, 1+2*len(node.GetLabels())) - nodeLabelset[nodeNameLabel] = lv(*nodeName) - for k, v := range node.GetLabels() { - ln := strutil.SanitizeLabelName(k) - nodeLabelset[model.LabelName(nodeLabelPrefix+ln)] = lv(v) - nodeLabelset[model.LabelName(nodeLabelPresentPrefix+ln)] = presentValue - } - + nodeLabelset := make(model.LabelSet) + addObjectMetaLabels(nodeLabelset, node.ObjectMeta, RoleNode) return tg.Merge(nodeLabelset) } diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index c7df642525..ed23a95b33 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -274,9 +274,11 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou } tg.Labels = model.LabelSet{ namespaceLabel: lv(eps.namespace()), - endpointSliceNameLabel: lv(eps.name()), endpointSliceAddressTypeLabel: lv(eps.addressType()), } + + addObjectMetaLabels(tg.Labels, eps.getObjectMeta(), RoleEndpointSlice) + e.addServiceLabels(eps, tg) type podEntry struct { diff --git a/discovery/kubernetes/endpointslice_adaptor.go b/discovery/kubernetes/endpointslice_adaptor.go index 5a21f1b899..46fa708c10 100644 --- a/discovery/kubernetes/endpointslice_adaptor.go +++ b/discovery/kubernetes/endpointslice_adaptor.go @@ -17,11 +17,13 @@ import ( corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/discovery/v1" "k8s.io/api/discovery/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // endpointSliceAdaptor is an adaptor for the different EndpointSlice versions type endpointSliceAdaptor interface { get() interface{} + getObjectMeta() metav1.ObjectMeta name() string namespace() string addressType() string @@ -66,6 +68,10 @@ func (e *endpointSliceAdaptorV1) get() interface{} { return e.endpointSlice } +func (e *endpointSliceAdaptorV1) getObjectMeta() metav1.ObjectMeta { + return e.endpointSlice.ObjectMeta +} + func (e *endpointSliceAdaptorV1) name() string { return e.endpointSlice.ObjectMeta.Name } @@ -115,6 +121,10 @@ func (e *endpointSliceAdaptorV1Beta1) get() interface{} { return e.endpointSlice } +func (e *endpointSliceAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta { + return e.endpointSlice.ObjectMeta +} + func (e *endpointSliceAdaptorV1Beta1) name() string { return e.endpointSlice.Name } diff --git a/discovery/kubernetes/endpointslice_test.go b/discovery/kubernetes/endpointslice_test.go index 8104e3db3b..42e64a0567 100644 --- a/discovery/kubernetes/endpointslice_test.go +++ b/discovery/kubernetes/endpointslice_test.go @@ -219,9 +219,11 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", }, Source: "endpointslice/default/testendpoints", }, @@ -280,9 +282,11 @@ func TestEndpointSliceDiscoveryBeforeRunV1beta1(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", }, Source: "endpointslice/default/testendpoints", }, @@ -478,9 +482,11 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) { }, }, Labels: map[model.LabelName]model.LabelValue{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_namespace": "default", }, }, }, @@ -574,9 +580,11 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_namespace": "default", }, }, }, @@ -659,9 +667,11 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_namespace": "default", }, Source: "endpointslice/default/testendpoints", }, @@ -739,12 +749,14 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_service_label_app_name": "test", - "__meta_kubernetes_service_labelpresent_app_name": "true", - "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_service_label_app_name": "test", + "__meta_kubernetes_service_labelpresent_app_name": "true", + "__meta_kubernetes_service_name": "testendpoints", }, Source: "endpointslice/default/testendpoints", }, @@ -835,14 +847,16 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_service_label_app_name": "svc", - "__meta_kubernetes_service_label_component": "testing", - "__meta_kubernetes_service_labelpresent_app_name": "true", - "__meta_kubernetes_service_labelpresent_component": "true", - "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_service_label_app_name": "svc", + "__meta_kubernetes_service_label_component": "testing", + "__meta_kubernetes_service_labelpresent_app_name": "true", + "__meta_kubernetes_service_labelpresent_component": "true", + "__meta_kubernetes_service_name": "testendpoints", }, Source: "endpointslice/default/testendpoints", }, @@ -927,12 +941,14 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_service_label_app_name": "test", - "__meta_kubernetes_service_labelpresent_app_name": "true", - "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_service_label_app_name": "test", + "__meta_kubernetes_service_labelpresent_app_name": "true", + "__meta_kubernetes_service_name": "testendpoints", }, Source: "endpointslice/default/testendpoints", }, @@ -1023,12 +1039,14 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_service_label_app_name": "test", - "__meta_kubernetes_service_labelpresent_app_name": "true", - "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_service_label_app_name": "test", + "__meta_kubernetes_service_labelpresent_app_name": "true", + "__meta_kubernetes_service_name": "testendpoints", }, Source: "endpointslice/default/testendpoints", }, @@ -1159,12 +1177,14 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_namespace": "ns1", - "__meta_kubernetes_service_label_app": "app1", - "__meta_kubernetes_service_labelpresent_app": "true", - "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_namespace": "ns1", + "__meta_kubernetes_service_label_app": "app1", + "__meta_kubernetes_service_labelpresent_app": "true", + "__meta_kubernetes_service_name": "testendpoints", }, Source: "endpointslice/ns1/testendpoints", }, @@ -1303,9 +1323,11 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_namespace": "own-ns", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_namespace": "own-ns", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", }, Source: "endpointslice/own-ns/testendpoints", }, diff --git a/discovery/kubernetes/ingress.go b/discovery/kubernetes/ingress.go index ad47c341a5..697b6f5198 100644 --- a/discovery/kubernetes/ingress.go +++ b/discovery/kubernetes/ingress.go @@ -28,7 +28,6 @@ import ( "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/util/strutil" ) var ( @@ -143,37 +142,22 @@ func ingressSourceFromNamespaceAndName(namespace, name string) string { } const ( - ingressNameLabel = metaLabelPrefix + "ingress_name" - ingressLabelPrefix = metaLabelPrefix + "ingress_label_" - ingressLabelPresentPrefix = metaLabelPrefix + "ingress_labelpresent_" - ingressAnnotationPrefix = metaLabelPrefix + "ingress_annotation_" - ingressAnnotationPresentPrefix = metaLabelPrefix + "ingress_annotationpresent_" - ingressSchemeLabel = metaLabelPrefix + "ingress_scheme" - ingressHostLabel = metaLabelPrefix + "ingress_host" - ingressPathLabel = metaLabelPrefix + "ingress_path" - ingressClassNameLabel = metaLabelPrefix + "ingress_class_name" + ingressSchemeLabel = metaLabelPrefix + "ingress_scheme" + ingressHostLabel = metaLabelPrefix + "ingress_host" + ingressPathLabel = metaLabelPrefix + "ingress_path" + ingressClassNameLabel = metaLabelPrefix + "ingress_class_name" ) func ingressLabels(ingress ingressAdaptor) model.LabelSet { // Each label and annotation will create two key-value pairs in the map. - ls := make(model.LabelSet, 2*(len(ingress.labels())+len(ingress.annotations()))+2) - ls[ingressNameLabel] = lv(ingress.name()) + ls := make(model.LabelSet) ls[namespaceLabel] = lv(ingress.namespace()) if cls := ingress.ingressClassName(); cls != nil { ls[ingressClassNameLabel] = lv(*cls) } - for k, v := range ingress.labels() { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(ingressLabelPrefix+ln)] = lv(v) - ls[model.LabelName(ingressLabelPresentPrefix+ln)] = presentValue - } + addObjectMetaLabels(ls, ingress.getObjectMeta(), RoleIngress) - for k, v := range ingress.annotations() { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(ingressAnnotationPrefix+ln)] = lv(v) - ls[model.LabelName(ingressAnnotationPresentPrefix+ln)] = presentValue - } return ls } diff --git a/discovery/kubernetes/ingress_adaptor.go b/discovery/kubernetes/ingress_adaptor.go index 113a067ca0..7be8538b53 100644 --- a/discovery/kubernetes/ingress_adaptor.go +++ b/discovery/kubernetes/ingress_adaptor.go @@ -16,10 +16,12 @@ package kubernetes import ( v1 "k8s.io/api/networking/v1" "k8s.io/api/networking/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // ingressAdaptor is an adaptor for the different Ingress versions type ingressAdaptor interface { + getObjectMeta() metav1.ObjectMeta name() string namespace() string labels() map[string]string @@ -43,11 +45,12 @@ func newIngressAdaptorFromV1(ingress *v1.Ingress) ingressAdaptor { return &ingressAdaptorV1{ingress: ingress} } -func (i *ingressAdaptorV1) name() string { return i.ingress.Name } -func (i *ingressAdaptorV1) namespace() string { return i.ingress.Namespace } -func (i *ingressAdaptorV1) labels() map[string]string { return i.ingress.Labels } -func (i *ingressAdaptorV1) annotations() map[string]string { return i.ingress.Annotations } -func (i *ingressAdaptorV1) ingressClassName() *string { return i.ingress.Spec.IngressClassName } +func (i *ingressAdaptorV1) getObjectMeta() metav1.ObjectMeta { return i.ingress.ObjectMeta } +func (i *ingressAdaptorV1) name() string { return i.ingress.Name } +func (i *ingressAdaptorV1) namespace() string { return i.ingress.Namespace } +func (i *ingressAdaptorV1) labels() map[string]string { return i.ingress.Labels } +func (i *ingressAdaptorV1) annotations() map[string]string { return i.ingress.Annotations } +func (i *ingressAdaptorV1) ingressClassName() *string { return i.ingress.Spec.IngressClassName } func (i *ingressAdaptorV1) tlsHosts() []string { var hosts []string @@ -95,12 +98,12 @@ type ingressAdaptorV1Beta1 struct { func newIngressAdaptorFromV1beta1(ingress *v1beta1.Ingress) ingressAdaptor { return &ingressAdaptorV1Beta1{ingress: ingress} } - -func (i *ingressAdaptorV1Beta1) name() string { return i.ingress.Name } -func (i *ingressAdaptorV1Beta1) namespace() string { return i.ingress.Namespace } -func (i *ingressAdaptorV1Beta1) labels() map[string]string { return i.ingress.Labels } -func (i *ingressAdaptorV1Beta1) annotations() map[string]string { return i.ingress.Annotations } -func (i *ingressAdaptorV1Beta1) ingressClassName() *string { return i.ingress.Spec.IngressClassName } +func (i *ingressAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta { return i.ingress.ObjectMeta } +func (i *ingressAdaptorV1Beta1) name() string { return i.ingress.Name } +func (i *ingressAdaptorV1Beta1) namespace() string { return i.ingress.Namespace } +func (i *ingressAdaptorV1Beta1) labels() map[string]string { return i.ingress.Labels } +func (i *ingressAdaptorV1Beta1) annotations() map[string]string { return i.ingress.Annotations } +func (i *ingressAdaptorV1Beta1) ingressClassName() *string { return i.ingress.Spec.IngressClassName } func (i *ingressAdaptorV1Beta1) tlsHosts() []string { var hosts []string diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index e87a1c9b24..d45ac41934 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "github.com/prometheus/prometheus/util/strutil" "os" "reflect" "strings" @@ -843,3 +844,19 @@ func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) { // https://kubernetes.io/docs/reference/using-api/deprecation-guide/#v1-25 return semVer.Major() >= 1 && semVer.Minor() >= 21, nil } + +func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta, role Role) { + labelSet[model.LabelName(metaLabelPrefix+string(role)+"_name")] = lv(objectMeta.Name) + + for k, v := range objectMeta.Labels { + ln := strutil.SanitizeLabelName(k) + labelSet[model.LabelName(metaLabelPrefix+string(role)+"_label_"+ln)] = lv(v) + labelSet[model.LabelName(metaLabelPrefix+string(role)+"_labelpresent_"+ln)] = presentValue + } + + for k, v := range objectMeta.Annotations { + ln := strutil.SanitizeLabelName(k) + labelSet[model.LabelName(metaLabelPrefix+string(role)+"_annotation_"+ln)] = lv(v) + labelSet[model.LabelName(metaLabelPrefix+string(role)+"_annotationpresent_"+ln)] = presentValue + } +} diff --git a/discovery/kubernetes/node.go b/discovery/kubernetes/node.go index d0a6d2780d..6a20e7b1f2 100644 --- a/discovery/kubernetes/node.go +++ b/discovery/kubernetes/node.go @@ -152,33 +152,18 @@ func nodeSourceFromName(name string) string { } const ( - nodeNameLabel = metaLabelPrefix + "node_name" - nodeProviderIDLabel = metaLabelPrefix + "node_provider_id" - nodeLabelPrefix = metaLabelPrefix + "node_label_" - nodeLabelPresentPrefix = metaLabelPrefix + "node_labelpresent_" - nodeAnnotationPrefix = metaLabelPrefix + "node_annotation_" - nodeAnnotationPresentPrefix = metaLabelPrefix + "node_annotationpresent_" - nodeAddressPrefix = metaLabelPrefix + "node_address_" + nodeProviderIDLabel = metaLabelPrefix + "node_provider_id" + nodeAddressPrefix = metaLabelPrefix + "node_address_" ) func nodeLabels(n *apiv1.Node) model.LabelSet { // Each label and annotation will create two key-value pairs in the map. - ls := make(model.LabelSet, 2*(len(n.Labels)+len(n.Annotations))+1) + ls := make(model.LabelSet) - ls[nodeNameLabel] = lv(n.Name) ls[nodeProviderIDLabel] = lv(n.Spec.ProviderID) - for k, v := range n.Labels { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(nodeLabelPrefix+ln)] = lv(v) - ls[model.LabelName(nodeLabelPresentPrefix+ln)] = presentValue - } + addObjectMetaLabels(ls, n.ObjectMeta, RoleNode) - for k, v := range n.Annotations { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(nodeAnnotationPrefix+ln)] = lv(v) - ls[model.LabelName(nodeAnnotationPresentPrefix+ln)] = presentValue - } return ls } diff --git a/discovery/kubernetes/pod.go b/discovery/kubernetes/pod.go index 732cf52ad9..74f74c1f75 100644 --- a/discovery/kubernetes/pod.go +++ b/discovery/kubernetes/pod.go @@ -30,7 +30,6 @@ import ( "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/util/strutil" ) const nodeIndex = "node" @@ -180,7 +179,6 @@ func convertToPod(o interface{}) (*apiv1.Pod, error) { } const ( - podNameLabel = metaLabelPrefix + "pod_name" podIPLabel = metaLabelPrefix + "pod_ip" podContainerNameLabel = metaLabelPrefix + "pod_container_name" podContainerIDLabel = metaLabelPrefix + "pod_container_id" @@ -191,10 +189,6 @@ const ( podContainerIsInit = metaLabelPrefix + "pod_container_init" podReadyLabel = metaLabelPrefix + "pod_ready" podPhaseLabel = metaLabelPrefix + "pod_phase" - podLabelPrefix = metaLabelPrefix + "pod_label_" - podLabelPresentPrefix = metaLabelPrefix + "pod_labelpresent_" - podAnnotationPrefix = metaLabelPrefix + "pod_annotation_" - podAnnotationPresentPrefix = metaLabelPrefix + "pod_annotationpresent_" podNodeNameLabel = metaLabelPrefix + "pod_node_name" podHostIPLabel = metaLabelPrefix + "pod_host_ip" podUID = metaLabelPrefix + "pod_uid" @@ -215,7 +209,6 @@ func GetControllerOf(controllee metav1.Object) *metav1.OwnerReference { func podLabels(pod *apiv1.Pod) model.LabelSet { ls := model.LabelSet{ - podNameLabel: lv(pod.ObjectMeta.Name), podIPLabel: lv(pod.Status.PodIP), podReadyLabel: podReady(pod), podPhaseLabel: lv(string(pod.Status.Phase)), @@ -224,6 +217,8 @@ func podLabels(pod *apiv1.Pod) model.LabelSet { podUID: lv(string(pod.ObjectMeta.UID)), } + addObjectMetaLabels(ls, pod.ObjectMeta, RolePod) + createdBy := GetControllerOf(pod) if createdBy != nil { if createdBy.Kind != "" { @@ -234,18 +229,6 @@ func podLabels(pod *apiv1.Pod) model.LabelSet { } } - for k, v := range pod.Labels { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(podLabelPrefix+ln)] = lv(v) - ls[model.LabelName(podLabelPresentPrefix+ln)] = presentValue - } - - for k, v := range pod.Annotations { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(podAnnotationPrefix+ln)] = lv(v) - ls[model.LabelName(podAnnotationPresentPrefix+ln)] = presentValue - } - return ls } diff --git a/discovery/kubernetes/service.go b/discovery/kubernetes/service.go index 40e17679ee..96cac33659 100644 --- a/discovery/kubernetes/service.go +++ b/discovery/kubernetes/service.go @@ -28,7 +28,6 @@ import ( "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/util/strutil" ) var ( @@ -162,23 +161,10 @@ const ( ) func serviceLabels(svc *apiv1.Service) model.LabelSet { - // Each label and annotation will create two key-value pairs in the map. - ls := make(model.LabelSet, 2*(len(svc.Labels)+len(svc.Annotations))+2) - - ls[serviceNameLabel] = lv(svc.Name) + ls := make(model.LabelSet) ls[namespaceLabel] = lv(svc.Namespace) + addObjectMetaLabels(ls, svc.ObjectMeta, RoleService) - for k, v := range svc.Labels { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(serviceLabelPrefix+ln)] = lv(v) - ls[model.LabelName(serviceLabelPresentPrefix+ln)] = presentValue - } - - for k, v := range svc.Annotations { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(serviceAnnotationPrefix+ln)] = lv(v) - ls[model.LabelName(serviceAnnotationPresentPrefix+ln)] = presentValue - } return ls } From 98ffad01b80023d923bcab3d151870d53fd3a422 Mon Sep 17 00:00:00 2001 From: "renzheng.wang" Date: Sat, 25 Jun 2022 21:58:44 +0800 Subject: [PATCH 15/40] update tests and docs Signed-off-by: renzheng.wang --- discovery/kubernetes/endpoints_test.go | 79 +++++++++++++--------- discovery/kubernetes/endpointslice_test.go | 28 ++++++++ docs/configuration/configuration.md | 6 ++ 3 files changed, 82 insertions(+), 31 deletions(-) diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index 5aa58bdc49..346bf61330 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -32,6 +32,9 @@ func makeEndpoints() *v1.Endpoints { ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "default", + Annotations: map[string]string{ + "test.annotation": "test", + }, }, Subsets: []v1.EndpointSubset{ { @@ -134,8 +137,10 @@ func TestEndpointsDiscoveryBeforeRun(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_endpoints_annotation_test_annotation": "test", + "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", }, Source: "endpoints/default/testendpoints", }, @@ -434,11 +439,13 @@ func TestEndpointsDiscoveryWithService(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_endpoints_name": "testendpoints", - "__meta_kubernetes_service_label_app_name": "test", - "__meta_kubernetes_service_labelpresent_app_name": "true", - "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_service_label_app_name": "test", + "__meta_kubernetes_service_labelpresent_app_name": "true", + "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_endpoints_annotation_test_annotation": "test", + "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", }, Source: "endpoints/default/testendpoints", }, @@ -510,13 +517,15 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_endpoints_name": "testendpoints", - "__meta_kubernetes_service_label_app_name": "svc", - "__meta_kubernetes_service_labelpresent_app_name": "true", - "__meta_kubernetes_service_name": "testendpoints", - "__meta_kubernetes_service_label_component": "testing", - "__meta_kubernetes_service_labelpresent_component": "true", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_service_label_app_name": "svc", + "__meta_kubernetes_service_labelpresent_app_name": "true", + "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_service_label_component": "testing", + "__meta_kubernetes_service_labelpresent_component": "true", + "__meta_kubernetes_endpoints_annotation_test_annotation": "test", + "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", }, Source: "endpoints/default/testendpoints", }, @@ -583,11 +592,13 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_endpoints_name": "testendpoints", - "__meta_kubernetes_service_label_app_name": "test", - "__meta_kubernetes_service_labelpresent_app_name": "true", - "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_service_label_app_name": "test", + "__meta_kubernetes_service_labelpresent_app_name": "true", + "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_endpoints_annotation_test_annotation": "test", + "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", }, Source: "endpoints/default/testendpoints", }, @@ -658,11 +669,13 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_endpoints_name": "testendpoints", - "__meta_kubernetes_service_label_app_name": "test", - "__meta_kubernetes_service_labelpresent_app_name": "true", - "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_service_label_app_name": "test", + "__meta_kubernetes_service_labelpresent_app_name": "true", + "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_endpoints_annotation_test_annotation": "test", + "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", }, Source: "endpoints/default/testendpoints", }, @@ -777,11 +790,13 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_namespace": "ns1", - "__meta_kubernetes_endpoints_name": "testendpoints", - "__meta_kubernetes_service_label_app": "app1", - "__meta_kubernetes_service_labelpresent_app": "true", - "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_namespace": "ns1", + "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_endpoints_annotation_test_annotation": "test", + "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", + "__meta_kubernetes_service_label_app": "app1", + "__meta_kubernetes_service_labelpresent_app": "true", + "__meta_kubernetes_service_name": "testendpoints", }, Source: "endpoints/ns1/testendpoints", }, @@ -901,8 +916,10 @@ func TestEndpointsDiscoveryOwnNamespace(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_namespace": "own-ns", - "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_namespace": "own-ns", + "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_endpoints_annotation_test_annotation": "test", + "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", }, Source: "endpoints/own-ns/testendpoints", }, diff --git a/discovery/kubernetes/endpointslice_test.go b/discovery/kubernetes/endpointslice_test.go index 42e64a0567..5bc6bd41d2 100644 --- a/discovery/kubernetes/endpointslice_test.go +++ b/discovery/kubernetes/endpointslice_test.go @@ -52,6 +52,9 @@ func makeEndpointSliceV1() *v1.EndpointSlice { Labels: map[string]string{ v1.LabelServiceName: "testendpoints", }, + Annotations: map[string]string{ + "test.annotation": "test", + }, }, AddressType: v1.AddressTypeIPv4, Ports: []v1.EndpointPort{ @@ -114,6 +117,9 @@ func makeEndpointSliceV1beta1() *v1beta1.EndpointSlice { Labels: map[string]string{ v1beta1.LabelServiceName: "testendpoints", }, + Annotations: map[string]string{ + "test.annotation": "test", + }, }, AddressType: v1beta1.AddressTypeIPv4, Ports: []v1beta1.EndpointPort{ @@ -224,6 +230,8 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", }, Source: "endpointslice/default/testendpoints", }, @@ -287,6 +295,8 @@ func TestEndpointSliceDiscoveryBeforeRunV1beta1(t *testing.T) { "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", }, Source: "endpointslice/default/testendpoints", }, @@ -486,6 +496,8 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) { "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", "__meta_kubernetes_namespace": "default", }, }, @@ -584,6 +596,8 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) { "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", "__meta_kubernetes_namespace": "default", }, }, @@ -671,6 +685,8 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) { "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", "__meta_kubernetes_namespace": "default", }, Source: "endpointslice/default/testendpoints", @@ -753,6 +769,8 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) { "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", "__meta_kubernetes_namespace": "default", "__meta_kubernetes_service_label_app_name": "test", "__meta_kubernetes_service_labelpresent_app_name": "true", @@ -851,6 +869,8 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", "__meta_kubernetes_namespace": "default", "__meta_kubernetes_service_label_app_name": "svc", "__meta_kubernetes_service_label_component": "testing", @@ -945,6 +965,8 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", "__meta_kubernetes_namespace": "default", "__meta_kubernetes_service_label_app_name": "test", "__meta_kubernetes_service_labelpresent_app_name": "true", @@ -1043,6 +1065,8 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", "__meta_kubernetes_namespace": "default", "__meta_kubernetes_service_label_app_name": "test", "__meta_kubernetes_service_labelpresent_app_name": "true", @@ -1181,6 +1205,8 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", "__meta_kubernetes_namespace": "ns1", "__meta_kubernetes_service_label_app": "app1", "__meta_kubernetes_service_labelpresent_app": "true", @@ -1328,6 +1354,8 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) { "__meta_kubernetes_namespace": "own-ns", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", }, Source: "endpointslice/own-ns/testendpoints", }, diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index b0b587e02a..a528be7b74 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -1970,6 +1970,8 @@ Available meta labels: * `__meta_kubernetes_endpoints_name`: The names of the endpoints object. * `__meta_kubernetes_endpoints_label_`: Each label from the endpoints object. * `__meta_kubernetes_endpoints_labelpresent_`: `true` for each label from the endpoints object. +* `__meta_kubernetes_endpoints_annotation_`: Each annotation from the endpoints object. +* `__meta_kubernetes_endpoints_annotationpresent_`: `true` for each annotation from the endpoints object. * For all targets discovered directly from the endpoints list (those not additionally inferred from underlying pods), the following labels are attached: * `__meta_kubernetes_endpoint_hostname`: Hostname of the endpoint. @@ -1992,6 +1994,10 @@ Available meta labels: * `__meta_kubernetes_namespace`: The namespace of the endpoints object. * `__meta_kubernetes_endpointslice_name`: The name of endpointslice object. +* `__meta_kubernetes_endpointslice_label_`: Each label from the endpointslice object. +* `__meta_kubernetes_endpointslice_labelpresent_`: `true` for each label from the endpointslice object. +* `__meta_kubernetes_endpointslice_annotation_`: Each annotation from the endpointslice object. +* `__meta_kubernetes_endpointslice_annotationpresent_`: `true` for each annotation from the endpointslice object. * For all targets discovered directly from the endpointslice list (those not additionally inferred from underlying pods), the following labels are attached: * `__meta_kubernetes_endpointslice_address_target_kind`: Kind of the referenced object. From b2c5de2e65aeb5c3faba24538bf0290f3ed75f57 Mon Sep 17 00:00:00 2001 From: "renzheng.wang" Date: Sat, 25 Jun 2022 22:40:47 +0800 Subject: [PATCH 16/40] fix lint issue Signed-off-by: renzheng.wang --- discovery/kubernetes/endpointslice.go | 1 - discovery/kubernetes/kubernetes.go | 3 ++- discovery/kubernetes/service.go | 19 +++++++------------ 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index ed23a95b33..a8b492f47c 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -252,7 +252,6 @@ func endpointSliceSourceFromNamespaceAndName(namespace, name string) string { } const ( - endpointSliceNameLabel = metaLabelPrefix + "endpointslice_name" endpointSliceAddressTypeLabel = metaLabelPrefix + "endpointslice_address_type" endpointSlicePortNameLabel = metaLabelPrefix + "endpointslice_port_name" endpointSlicePortProtocolLabel = metaLabelPrefix + "endpointslice_port_protocol" diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index d45ac41934..ca5ee49e28 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -17,13 +17,14 @@ import ( "context" "errors" "fmt" - "github.com/prometheus/prometheus/util/strutil" "os" "reflect" "strings" "sync" "time" + "github.com/prometheus/prometheus/util/strutil" + disv1beta1 "k8s.io/api/discovery/v1beta1" "github.com/go-kit/log" diff --git a/discovery/kubernetes/service.go b/discovery/kubernetes/service.go index 96cac33659..7addf0054e 100644 --- a/discovery/kubernetes/service.go +++ b/discovery/kubernetes/service.go @@ -146,18 +146,13 @@ func serviceSourceFromNamespaceAndName(namespace, name string) string { } const ( - serviceNameLabel = metaLabelPrefix + "service_name" - serviceLabelPrefix = metaLabelPrefix + "service_label_" - serviceLabelPresentPrefix = metaLabelPrefix + "service_labelpresent_" - serviceAnnotationPrefix = metaLabelPrefix + "service_annotation_" - serviceAnnotationPresentPrefix = metaLabelPrefix + "service_annotationpresent_" - servicePortNameLabel = metaLabelPrefix + "service_port_name" - servicePortNumberLabel = metaLabelPrefix + "service_port_number" - servicePortProtocolLabel = metaLabelPrefix + "service_port_protocol" - serviceClusterIPLabel = metaLabelPrefix + "service_cluster_ip" - serviceLoadBalancerIP = metaLabelPrefix + "service_loadbalancer_ip" - serviceExternalNameLabel = metaLabelPrefix + "service_external_name" - serviceType = metaLabelPrefix + "service_type" + servicePortNameLabel = metaLabelPrefix + "service_port_name" + servicePortNumberLabel = metaLabelPrefix + "service_port_number" + servicePortProtocolLabel = metaLabelPrefix + "service_port_protocol" + serviceClusterIPLabel = metaLabelPrefix + "service_cluster_ip" + serviceLoadBalancerIP = metaLabelPrefix + "service_loadbalancer_ip" + serviceExternalNameLabel = metaLabelPrefix + "service_external_name" + serviceType = metaLabelPrefix + "service_type" ) func serviceLabels(svc *apiv1.Service) model.LabelSet { From 5255bf06ad0816f2823560b21aeb70dd7b2046a6 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sun, 2 Jul 2023 22:16:26 +0000 Subject: [PATCH 17/40] Replace sort.Slice with faster slices.SortFunc The generic version is more efficient. Signed-off-by: Bryan Boreham --- cmd/promtool/tsdb.go | 4 ++-- rules/manager.go | 17 ++++++++--------- scrape/scrape.go | 6 +++--- storage/remote/read_handler.go | 6 +++--- tsdb/compact.go | 10 +++++----- tsdb/db.go | 14 +++++++------- tsdb/exemplar.go | 6 +++--- tsdb/head_read.go | 4 ++-- tsdb/index/postings.go | 8 ++++---- tsdb/index/postingsstats.go | 7 ++++--- tsdb/wlog/checkpoint.go | 6 +++--- tsdb/wlog/wlog.go | 6 +++--- 12 files changed, 47 insertions(+), 47 deletions(-) diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index b7fad5fe09..844a387c48 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -23,7 +23,6 @@ import ( "path/filepath" "runtime" "runtime/pprof" - "sort" "strconv" "strings" "sync" @@ -34,6 +33,7 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/index" + "golang.org/x/exp/slices" "github.com/alecthomas/units" "github.com/go-kit/log" @@ -447,7 +447,7 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error { postingInfos := []postingInfo{} printInfo := func(postingInfos []postingInfo) { - sort.Slice(postingInfos, func(i, j int) bool { return postingInfos[i].metric > postingInfos[j].metric }) + slices.SortFunc(postingInfos, func(a, b postingInfo) bool { return a.metric > b.metric }) for i, pc := range postingInfos { if i >= limit { diff --git a/rules/manager.go b/rules/manager.go index 31c90e9e9d..4f848a74b1 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -19,7 +19,6 @@ import ( "fmt" "math" "net/url" - "sort" "sync" "time" @@ -30,6 +29,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/rulefmt" @@ -490,10 +490,9 @@ func (g *Group) AlertingRules() []*AlertingRule { alerts = append(alerts, alertingRule) } } - sort.Slice(alerts, func(i, j int) bool { - return alerts[i].State() > alerts[j].State() || - (alerts[i].State() == alerts[j].State() && - alerts[i].Name() < alerts[j].Name()) + slices.SortFunc(alerts, func(a, b *AlertingRule) bool { + return a.State() > b.State() || + (a.State() == b.State() && a.Name() < b.Name()) }) return alerts } @@ -1189,11 +1188,11 @@ func (m *Manager) RuleGroups() []*Group { rgs = append(rgs, g) } - sort.Slice(rgs, func(i, j int) bool { - if rgs[i].file != rgs[j].file { - return rgs[i].file < rgs[j].file + slices.SortFunc(rgs, func(a, b *Group) bool { + if a.file != b.file { + return a.file < b.file } - return rgs[i].name < rgs[j].name + return a.name < b.name }) return rgs diff --git a/scrape/scrape.go b/scrape/scrape.go index 8c4cc51e74..ec65c5ad9b 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -23,7 +23,6 @@ import ( "math" "net/http" "reflect" - "sort" "strconv" "sync" "time" @@ -35,6 +34,7 @@ import ( config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -720,8 +720,8 @@ func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*re } func resolveConflictingExposedLabels(lb *labels.Builder, conflictingExposedLabels []labels.Label) { - sort.SliceStable(conflictingExposedLabels, func(i, j int) bool { - return len(conflictingExposedLabels[i].Name) < len(conflictingExposedLabels[j].Name) + slices.SortStableFunc(conflictingExposedLabels, func(a, b labels.Label) bool { + return len(a.Name) < len(b.Name) }) for _, l := range conflictingExposedLabels { diff --git a/storage/remote/read_handler.go b/storage/remote/read_handler.go index 116eb9596c..aca4d7dd57 100644 --- a/storage/remote/read_handler.go +++ b/storage/remote/read_handler.go @@ -16,12 +16,12 @@ package remote import ( "context" "net/http" - "sort" "sync" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" @@ -92,8 +92,8 @@ func (h *readHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { Value: value, }) } - sort.Slice(sortedExternalLabels, func(i, j int) bool { - return sortedExternalLabels[i].Name < sortedExternalLabels[j].Name + slices.SortFunc(sortedExternalLabels, func(a, b prompb.Label) bool { + return a.Name < b.Name }) responseType, err := NegotiateResponseType(req.AcceptedResponseTypes) diff --git a/tsdb/compact.go b/tsdb/compact.go index e2b6f4c5ef..0d42f627fb 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -20,7 +20,6 @@ import ( "io" "os" "path/filepath" - "sort" "time" "github.com/go-kit/log" @@ -28,6 +27,7 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -200,8 +200,8 @@ func (c *LeveledCompactor) Plan(dir string) ([]string, error) { } func (c *LeveledCompactor) plan(dms []dirMeta) ([]string, error) { - sort.Slice(dms, func(i, j int) bool { - return dms[i].meta.MinTime < dms[j].meta.MinTime + slices.SortFunc(dms, func(a, b dirMeta) bool { + return a.meta.MinTime < b.meta.MinTime }) res := c.selectOverlappingDirs(dms) @@ -380,8 +380,8 @@ func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta { for s := range sources { res.Compaction.Sources = append(res.Compaction.Sources, s) } - sort.Slice(res.Compaction.Sources, func(i, j int) bool { - return res.Compaction.Sources[i].Compare(res.Compaction.Sources[j]) < 0 + slices.SortFunc(res.Compaction.Sources, func(a, b ulid.ULID) bool { + return a.Compare(b) < 0 }) res.MinTime = mint diff --git a/tsdb/db.go b/tsdb/db.go index 32dae57a52..62359a737e 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -22,7 +22,6 @@ import ( "math" "os" "path/filepath" - "sort" "strconv" "strings" "sync" @@ -34,6 +33,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" + "golang.org/x/exp/slices" "golang.org/x/sync/errgroup" "github.com/prometheus/prometheus/config" @@ -579,8 +579,8 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { return nil, nil } - sort.Slice(loadable, func(i, j int) bool { - return loadable[i].Meta().MinTime < loadable[j].Meta().MinTime + slices.SortFunc(loadable, func(a, b *Block) bool { + return a.Meta().MinTime < b.Meta().MinTime }) blockMetas := make([]BlockMeta, 0, len(loadable)) @@ -1445,8 +1445,8 @@ func (db *DB) reloadBlocks() (err error) { } db.metrics.blocksBytes.Set(float64(blocksSize)) - sort.Slice(toLoad, func(i, j int) bool { - return toLoad[i].Meta().MinTime < toLoad[j].Meta().MinTime + slices.SortFunc(toLoad, func(a, b *Block) bool { + return a.Meta().MinTime < b.Meta().MinTime }) // Swap new blocks first for subsequently created readers to be seen. @@ -1515,8 +1515,8 @@ func deletableBlocks(db *DB, blocks []*Block) map[ulid.ULID]struct{} { // Sort the blocks by time - newest to oldest (largest to smallest timestamp). // This ensures that the retentions will remove the oldest blocks. - sort.Slice(blocks, func(i, j int) bool { - return blocks[i].Meta().MaxTime > blocks[j].Meta().MaxTime + slices.SortFunc(blocks, func(a, b *Block) bool { + return a.Meta().MaxTime > b.Meta().MaxTime }) for _, block := range blocks { diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index 01718bb57d..bf401da3ce 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -15,11 +15,11 @@ package tsdb import ( "context" - "sort" "sync" "unicode/utf8" "github.com/prometheus/client_golang/prometheus" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" @@ -185,8 +185,8 @@ func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*label } } - sort.Slice(ret, func(i, j int) bool { - return labels.Compare(ret[i].SeriesLabels, ret[j].SeriesLabels) < 0 + slices.SortFunc(ret, func(a, b exemplar.QueryResult) bool { + return labels.Compare(a.SeriesLabels, b.SeriesLabels) < 0 }) return ret, nil diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 0e6e005ea2..c7146026ad 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -137,8 +137,8 @@ func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { return index.ErrPostings(errors.Wrap(err, "expand postings")) } - sort.Slice(series, func(i, j int) bool { - return labels.Compare(series[i].lset, series[j].lset) < 0 + slices.SortFunc(series, func(a, b *memSeries) bool { + return labels.Compare(a.lset, b.lset) < 0 }) // Convert back to list. diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 2ac6edbdca..3be8a1997f 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -107,11 +107,11 @@ func (p *MemPostings) SortedKeys() []labels.Label { } p.mtx.RUnlock() - sort.Slice(keys, func(i, j int) bool { - if keys[i].Name != keys[j].Name { - return keys[i].Name < keys[j].Name + slices.SortFunc(keys, func(a, b labels.Label) bool { + if a.Name != b.Name { + return a.Name < b.Name } - return keys[i].Value < keys[j].Value + return a.Value < b.Value }) return keys } diff --git a/tsdb/index/postingsstats.go b/tsdb/index/postingsstats.go index 6b29bddabf..8e5f62dbac 100644 --- a/tsdb/index/postingsstats.go +++ b/tsdb/index/postingsstats.go @@ -15,7 +15,8 @@ package index import ( "math" - "sort" + + "golang.org/x/exp/slices" ) // Stat holds values for a single cardinality statistic. @@ -62,8 +63,8 @@ func (m *maxHeap) push(item Stat) { } func (m *maxHeap) get() []Stat { - sort.Slice(m.Items, func(i, j int) bool { - return m.Items[i].Count > m.Items[j].Count + slices.SortFunc(m.Items, func(a, b Stat) bool { + return a.Count > b.Count }) return m.Items } diff --git a/tsdb/wlog/checkpoint.go b/tsdb/wlog/checkpoint.go index 42b03f48fb..fe9952a303 100644 --- a/tsdb/wlog/checkpoint.go +++ b/tsdb/wlog/checkpoint.go @@ -20,13 +20,13 @@ import ( "math" "os" "path/filepath" - "sort" "strconv" "strings" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/pkg/errors" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/tsdb/chunks" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -374,8 +374,8 @@ func listCheckpoints(dir string) (refs []checkpointRef, err error) { refs = append(refs, checkpointRef{name: fi.Name(), index: idx}) } - sort.Slice(refs, func(i, j int) bool { - return refs[i].index < refs[j].index + slices.SortFunc(refs, func(a, b checkpointRef) bool { + return a.index < b.index }) return refs, nil diff --git a/tsdb/wlog/wlog.go b/tsdb/wlog/wlog.go index e38cb94cbd..b7b1623f90 100644 --- a/tsdb/wlog/wlog.go +++ b/tsdb/wlog/wlog.go @@ -22,7 +22,6 @@ import ( "io" "os" "path/filepath" - "sort" "strconv" "sync" "time" @@ -32,6 +31,7 @@ import ( "github.com/golang/snappy" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/tsdb/fileutil" ) @@ -859,8 +859,8 @@ func listSegments(dir string) (refs []segmentRef, err error) { } refs = append(refs, segmentRef{name: fn, index: k}) } - sort.Slice(refs, func(i, j int) bool { - return refs[i].index < refs[j].index + slices.SortFunc(refs, func(a, b segmentRef) bool { + return a.index < b.index }) for i := 0; i < len(refs)-1; i++ { if refs[i].index+1 != refs[i+1].index { From 4edf8999da266339c0a27172976de5fd0de53596 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Thu, 6 Jul 2023 10:59:03 +0200 Subject: [PATCH 18/40] Update promu Update promu to support riscv64. Signed-off-by: SuperQ --- Makefile.common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.common b/Makefile.common index 787feff089..0ce7ea4612 100644 --- a/Makefile.common +++ b/Makefile.common @@ -55,7 +55,7 @@ ifneq ($(shell command -v gotestsum > /dev/null),) endif endif -PROMU_VERSION ?= 0.14.0 +PROMU_VERSION ?= 0.15.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz SKIP_GOLANGCI_LINT := From 7cc4292328e69c8c7c51ead620609ca26b735f6e Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Thu, 6 Jul 2023 17:48:13 +0200 Subject: [PATCH 19/40] Export MinTime and MaxTime Signed-off-by: Marco Pracucci --- web/api/v1/api.go | 37 +++++++++++++++++++++---------------- web/api/v1/api_test.go | 16 ++++++++-------- 2 files changed, 29 insertions(+), 24 deletions(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 8a6e6efdd5..fe343aab13 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -561,11 +561,11 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) { } func (api *API) queryExemplars(r *http.Request) apiFuncResult { - start, err := parseTimeParam(r, "start", minTime) + start, err := parseTimeParam(r, "start", MinTime) if err != nil { return invalidParamError(err, "start") } - end, err := parseTimeParam(r, "end", maxTime) + end, err := parseTimeParam(r, "end", MaxTime) if err != nil { return invalidParamError(err, "end") } @@ -625,11 +625,11 @@ func returnAPIError(err error) *apiError { } func (api *API) labelNames(r *http.Request) apiFuncResult { - start, err := parseTimeParam(r, "start", minTime) + start, err := parseTimeParam(r, "start", MinTime) if err != nil { return invalidParamError(err, "start") } - end, err := parseTimeParam(r, "end", maxTime) + end, err := parseTimeParam(r, "end", MaxTime) if err != nil { return invalidParamError(err, "end") } @@ -691,11 +691,11 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { return apiFuncResult{nil, &apiError{errorBadData, errors.Errorf("invalid label name: %q", name)}, nil, nil} } - start, err := parseTimeParam(r, "start", minTime) + start, err := parseTimeParam(r, "start", MinTime) if err != nil { return invalidParamError(err, "start") } - end, err := parseTimeParam(r, "end", maxTime) + end, err := parseTimeParam(r, "end", MaxTime) if err != nil { return invalidParamError(err, "end") } @@ -760,11 +760,16 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { } var ( - minTime = time.Unix(math.MinInt64/1000+62135596801, 0).UTC() - maxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC() + // MinTime is the default timestamp used for the begin of optional time ranges. + // Exposed to let downstream projects to reference it. + MinTime = time.Unix(math.MinInt64/1000+62135596801, 0).UTC() - minTimeFormatted = minTime.Format(time.RFC3339Nano) - maxTimeFormatted = maxTime.Format(time.RFC3339Nano) + // MaxTime is the default timestamp used for the end of optional time ranges. + // Exposed to let downstream projects to reference it. + MaxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC() + + minTimeFormatted = MinTime.Format(time.RFC3339Nano) + maxTimeFormatted = MaxTime.Format(time.RFC3339Nano) ) func (api *API) series(r *http.Request) (result apiFuncResult) { @@ -775,11 +780,11 @@ func (api *API) series(r *http.Request) (result apiFuncResult) { return apiFuncResult{nil, &apiError{errorBadData, errors.New("no match[] parameter provided")}, nil, nil} } - start, err := parseTimeParam(r, "start", minTime) + start, err := parseTimeParam(r, "start", MinTime) if err != nil { return invalidParamError(err, "start") } - end, err := parseTimeParam(r, "end", maxTime) + end, err := parseTimeParam(r, "end", MaxTime) if err != nil { return invalidParamError(err, "end") } @@ -1579,11 +1584,11 @@ func (api *API) deleteSeries(r *http.Request) apiFuncResult { return apiFuncResult{nil, &apiError{errorBadData, errors.New("no match[] parameter provided")}, nil, nil} } - start, err := parseTimeParam(r, "start", minTime) + start, err := parseTimeParam(r, "start", MinTime) if err != nil { return invalidParamError(err, "start") } - end, err := parseTimeParam(r, "end", maxTime) + end, err := parseTimeParam(r, "end", MaxTime) if err != nil { return invalidParamError(err, "end") } @@ -1738,9 +1743,9 @@ func parseTime(s string) (time.Time, error) { // Upstream issue: https://github.com/golang/go/issues/20555 switch s { case minTimeFormatted: - return minTime, nil + return MinTime, nil case maxTimeFormatted: - return maxTime, nil + return MaxTime, nil } return time.Time{}, errors.Errorf("cannot parse %q to a valid timestamp", s) } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 3aa10ee449..9105ce3362 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -3059,7 +3059,7 @@ func TestParseTimeParam(t *testing.T) { { // When data is valid. paramName: "start", paramValue: "1582468023986", - defaultValue: minTime, + defaultValue: MinTime, result: resultType{ asTime: ts, asError: nil, @@ -3068,16 +3068,16 @@ func TestParseTimeParam(t *testing.T) { { // When data is empty string. paramName: "end", paramValue: "", - defaultValue: maxTime, + defaultValue: MaxTime, result: resultType{ - asTime: maxTime, + asTime: MaxTime, asError: nil, }, }, { // When data is not valid. paramName: "foo", paramValue: "baz", - defaultValue: maxTime, + defaultValue: MaxTime, result: resultType{ asTime: time.Time{}, asError: func() error { @@ -3148,12 +3148,12 @@ func TestParseTime(t *testing.T) { result: time.Unix(1543578564, 705*1e6), }, { - input: minTime.Format(time.RFC3339Nano), - result: minTime, + input: MinTime.Format(time.RFC3339Nano), + result: MinTime, }, { - input: maxTime.Format(time.RFC3339Nano), - result: maxTime, + input: MaxTime.Format(time.RFC3339Nano), + result: MaxTime, }, } From 578e2b6a3f70384cb7908e8bc7d50319e59ac199 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sat, 8 Jul 2023 12:39:33 +0000 Subject: [PATCH 20/40] re-order imports for linter Signed-off-by: Bryan Boreham --- cmd/promtool/tsdb.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 844a387c48..4e27f69c05 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -29,20 +29,19 @@ import ( "text/tabwriter" "time" - "github.com/prometheus/prometheus/promql/parser" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/prometheus/prometheus/tsdb/index" - "golang.org/x/exp/slices" - "github.com/alecthomas/units" "github.com/go-kit/log" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" + "github.com/prometheus/prometheus/tsdb/index" ) const timeDelta = 30000 From ce153e3fffde388712629d205663dd4af55013db Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sat, 8 Jul 2023 12:45:56 +0000 Subject: [PATCH 21/40] Replace sort.Sort with faster slices.SortFunc The generic version is more efficient. Signed-off-by: Bryan Boreham --- promql/quantile.go | 10 +++++----- storage/remote/codec.go | 11 ++++------- tsdb/head_read.go | 2 +- tsdb/index/index.go | 8 +------- tsdb/ooo_head_read.go | 31 +++++++++++-------------------- tsdb/ooo_head_read_test.go | 7 ++++--- util/stats/timer.go | 33 ++++++++------------------------- web/federate.go | 19 ++++++------------- 8 files changed, 40 insertions(+), 81 deletions(-) diff --git a/promql/quantile.go b/promql/quantile.go index 78d0bbaf0c..d80345e817 100644 --- a/promql/quantile.go +++ b/promql/quantile.go @@ -17,6 +17,8 @@ import ( "math" "sort" + "golang.org/x/exp/slices" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" ) @@ -38,10 +40,6 @@ type bucket struct { // buckets implements sort.Interface. type buckets []bucket -func (b buckets) Len() int { return len(b) } -func (b buckets) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b buckets) Less(i, j int) bool { return b[i].upperBound < b[j].upperBound } - type metricWithBuckets struct { metric labels.Labels buckets buckets @@ -83,7 +81,9 @@ func bucketQuantile(q float64, buckets buckets) float64 { if q > 1 { return math.Inf(+1) } - sort.Sort(buckets) + slices.SortFunc(buckets, func(a, b bucket) bool { + return a.upperBound < b.upperBound + }) if !math.IsInf(buckets[len(buckets)-1].upperBound, +1) { return math.NaN() } diff --git a/storage/remote/codec.go b/storage/remote/codec.go index 6a58ec4ac6..3f426204e4 100644 --- a/storage/remote/codec.go +++ b/storage/remote/codec.go @@ -26,6 +26,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/common/model" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" @@ -178,7 +179,9 @@ func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet } if sortSeries { - sort.Sort(byLabel(series)) + slices.SortFunc(series, func(a, b storage.Series) bool { + return labels.Compare(a.Labels(), b.Labels()) < 0 + }) } return &concreteSeriesSet{ series: series, @@ -313,12 +316,6 @@ func MergeLabels(primary, secondary []prompb.Label) []prompb.Label { return result } -type byLabel []storage.Series - -func (a byLabel) Len() int { return len(a) } -func (a byLabel) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byLabel) Less(i, j int) bool { return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 } - // errSeriesSet implements storage.SeriesSet, just returning an error. type errSeriesSet struct { err error diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 0e6e005ea2..8d0a35fc25 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -450,7 +450,7 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper // Next we want to sort all the collected chunks by min time so we can find // those that overlap and stop when we know the rest don't. - sort.Sort(byMinTimeAndMinRef(tmpChks)) + slices.SortFunc(tmpChks, refLessByMinTimeAndMinRef) mc := &mergedOOOChunks{} absoluteMax := int64(math.MinInt64) diff --git a/tsdb/index/index.go b/tsdb/index/index.go index ef2d167dc8..d7650a8b33 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -924,7 +924,7 @@ func (w *Writer) writePostingsToTmpFiles() error { values = append(values, v) } // Symbol numbers are in order, so the strings will also be in order. - sort.Sort(uint32slice(values)) + slices.Sort(values) for _, v := range values { value, err := w.symbols.Lookup(v) if err != nil { @@ -1017,12 +1017,6 @@ func (w *Writer) writePostings() error { return nil } -type uint32slice []uint32 - -func (s uint32slice) Len() int { return len(s) } -func (s uint32slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s uint32slice) Less(i, j int) bool { return s[i] < s[j] } - type labelIndexHashEntry struct { keys []string offset uint64 diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 8ba3ea39af..8030fc367f 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -17,7 +17,8 @@ package tsdb import ( "errors" "math" - "sort" + + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" @@ -130,7 +131,7 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra // Next we want to sort all the collected chunks by min time so we can find // those that overlap. - sort.Sort(metaByMinTimeAndMinRef(tmpChks)) + slices.SortFunc(tmpChks, lessByMinTimeAndMinRef) // Next we want to iterate the sorted collected chunks and only return the // chunks Meta the first chunk that overlaps with others. @@ -175,30 +176,20 @@ type chunkMetaAndChunkDiskMapperRef struct { origMaxT int64 } -type byMinTimeAndMinRef []chunkMetaAndChunkDiskMapperRef - -func (b byMinTimeAndMinRef) Len() int { return len(b) } -func (b byMinTimeAndMinRef) Less(i, j int) bool { - if b[i].meta.MinTime == b[j].meta.MinTime { - return b[i].meta.Ref < b[j].meta.Ref +func refLessByMinTimeAndMinRef(a, b chunkMetaAndChunkDiskMapperRef) bool { + if a.meta.MinTime == b.meta.MinTime { + return a.meta.Ref < b.meta.Ref } - return b[i].meta.MinTime < b[j].meta.MinTime + return a.meta.MinTime < b.meta.MinTime } -func (b byMinTimeAndMinRef) Swap(i, j int) { b[i], b[j] = b[j], b[i] } - -type metaByMinTimeAndMinRef []chunks.Meta - -func (b metaByMinTimeAndMinRef) Len() int { return len(b) } -func (b metaByMinTimeAndMinRef) Less(i, j int) bool { - if b[i].MinTime == b[j].MinTime { - return b[i].Ref < b[j].Ref +func lessByMinTimeAndMinRef(a, b chunks.Meta) bool { + if a.MinTime == b.MinTime { + return a.Ref < b.Ref } - return b[i].MinTime < b[j].MinTime + return a.MinTime < b.MinTime } -func (b metaByMinTimeAndMinRef) Swap(i, j int) { b[i], b[j] = b[j], b[i] } - func (oh *OOOHeadIndexReader) Postings(name string, values ...string) (index.Postings, error) { switch len(values) { case 0: diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index f3ec862f5b..ed9ca27690 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -22,6 +22,7 @@ import ( "time" "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" @@ -337,7 +338,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { } expChunks = append(expChunks, meta) } - sort.Sort(metaByMinTimeAndMinRef(expChunks)) // we always want the chunks to come back sorted by minTime asc + slices.SortFunc(expChunks, lessByMinTimeAndMinRef) // We always want the chunks to come back sorted by minTime asc. if headChunk && len(intervals) > 0 { // Put the last interval in the head chunk @@ -1116,7 +1117,7 @@ func TestSortByMinTimeAndMinRef(t *testing.T) { for _, tc := range tests { t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) { - sort.Sort(byMinTimeAndMinRef(tc.input)) + slices.SortFunc(tc.input, refLessByMinTimeAndMinRef) require.Equal(t, tc.exp, tc.input) }) } @@ -1180,7 +1181,7 @@ func TestSortMetaByMinTimeAndMinRef(t *testing.T) { for _, tc := range tests { t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) { - sort.Sort(metaByMinTimeAndMinRef(tc.inputMetas)) + slices.SortFunc(tc.inputMetas, lessByMinTimeAndMinRef) require.Equal(t, tc.expMetas, tc.inputMetas) }) } diff --git a/util/stats/timer.go b/util/stats/timer.go index e47162680e..df1e2f931c 100644 --- a/util/stats/timer.go +++ b/util/stats/timer.go @@ -16,8 +16,9 @@ package stats import ( "bytes" "fmt" - "sort" "time" + + "golang.org/x/exp/slices" ) // A Timer that can be started and stopped and accumulates the total time it @@ -78,35 +79,17 @@ func (t *TimerGroup) GetTimer(name fmt.Stringer) *Timer { return timer } -// Timers is a slice of Timer pointers that implements Len and Swap from -// sort.Interface. -type Timers []*Timer - -type byCreationTimeSorter struct{ Timers } - -// Len implements sort.Interface. -func (t Timers) Len() int { - return len(t) -} - -// Swap implements sort.Interface. -func (t Timers) Swap(i, j int) { - t[i], t[j] = t[j], t[i] -} - -func (s byCreationTimeSorter) Less(i, j int) bool { - return s.Timers[i].created < s.Timers[j].created -} - // Return a string representation of a TimerGroup. func (t *TimerGroup) String() string { - timers := byCreationTimeSorter{} + timers := make([]*Timer, 0, len(t.timers)) for _, timer := range t.timers { - timers.Timers = append(timers.Timers, timer) + timers = append(timers, timer) } - sort.Sort(timers) + slices.SortFunc(timers, func(a, b *Timer) bool { + return a.created < b.created + }) result := &bytes.Buffer{} - for _, timer := range timers.Timers { + for _, timer := range timers { fmt.Fprintf(result, "%s\n", timer) } return result.String() diff --git a/web/federate.go b/web/federate.go index b0f4c06108..1c50faed06 100644 --- a/web/federate.go +++ b/web/federate.go @@ -25,6 +25,7 @@ import ( dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -166,7 +167,11 @@ Loop: return } - sort.Sort(byName(vec)) + slices.SortFunc(vec, func(a, b promql.Sample) bool { + ni := a.Metric.Get(labels.MetricName) + nj := b.Metric.Get(labels.MetricName) + return ni < nj + }) externalLabels := h.config.GlobalConfig.ExternalLabels.Map() if _, ok := externalLabels[model.InstanceLabel]; !ok { @@ -313,15 +318,3 @@ Loop: } } } - -// byName makes a model.Vector sortable by metric name. -type byName promql.Vector - -func (vec byName) Len() int { return len(vec) } -func (vec byName) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } - -func (vec byName) Less(i, j int) bool { - ni := vec[i].Metric.Get(labels.MetricName) - nj := vec[j].Metric.Get(labels.MetricName) - return ni < nj -} From 70e41fc5ac008d623820dcacdbbcbedbd64ca11f Mon Sep 17 00:00:00 2001 From: Merrick Clay Date: Mon, 10 Jul 2023 16:50:16 -0600 Subject: [PATCH 22/40] improve incorrect doc comment Signed-off-by: Merrick Clay --- tsdb/head.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsdb/head.go b/tsdb/head.go index a1d61fd6ab..499be067ad 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -977,7 +977,7 @@ func (h *Head) DisableNativeHistograms() { h.opts.EnableNativeHistograms.Store(false) } -// PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names. +// PostingsCardinalityStats returns highest cardinality stats by label and value names. func (h *Head) PostingsCardinalityStats(statsByLabelName string, limit int) *index.PostingsStats { h.cardinalityMutex.Lock() defer h.cardinalityMutex.Unlock() From bf5bf1a4b3460fb35ef0a93f783a5251317c96f7 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 11 Jul 2023 14:29:31 +0200 Subject: [PATCH 23/40] TSDB: Remove usused import of sort Signed-off-by: Julien Pivotto --- tsdb/head_read.go | 1 - 1 file changed, 1 deletion(-) diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 55e16ad12f..b2af74ace9 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -16,7 +16,6 @@ package tsdb import ( "context" "math" - "sort" "sync" "github.com/go-kit/log/level" From 32d87282addfaa2af9f7a6b953d17f3da6101d66 Mon Sep 17 00:00:00 2001 From: Justin Lei Date: Tue, 11 Jul 2023 05:57:57 -0700 Subject: [PATCH 24/40] Add Zstandard compression option for wlog (#11666) Snappy remains as the default compression but there is now a flag to switch the compression algorithm. Signed-off-by: Justin Lei --- cmd/prometheus/main.go | 13 ++- go.mod | 1 + go.sum | 2 + tsdb/agent/db.go | 10 ++- tsdb/compact_test.go | 7 +- tsdb/db.go | 6 +- tsdb/db_test.go | 12 +-- tsdb/head_test.go | 164 +++++++++++++++++------------------ tsdb/head_wal.go | 2 +- tsdb/ooo_head_read_test.go | 5 +- tsdb/wal.go | 2 +- tsdb/wal_test.go | 4 +- tsdb/wlog/checkpoint.go | 2 +- tsdb/wlog/checkpoint_test.go | 6 +- tsdb/wlog/live_reader.go | 53 ++++++----- tsdb/wlog/reader.go | 36 +++++--- tsdb/wlog/reader_test.go | 14 +-- tsdb/wlog/watcher_test.go | 28 +++--- tsdb/wlog/wlog.go | 90 ++++++++++++++----- tsdb/wlog/wlog_test.go | 52 ++++++----- 20 files changed, 302 insertions(+), 207 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 3d723f1529..debc0d3f9d 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -71,6 +71,7 @@ import ( "github.com/prometheus/prometheus/tracing" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/agent" + "github.com/prometheus/prometheus/tsdb/wlog" "github.com/prometheus/prometheus/util/documentcli" "github.com/prometheus/prometheus/util/logging" prom_runtime "github.com/prometheus/prometheus/util/runtime" @@ -334,6 +335,9 @@ func main() { serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL."). Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression) + serverOnlyFlag(a, "storage.tsdb.wal-compression-type", "Compression algorithm for the tsdb WAL."). + Hidden().Default(string(wlog.CompressionSnappy)).EnumVar(&cfg.tsdb.WALCompressionType, string(wlog.CompressionSnappy), string(wlog.CompressionZstd)) + serverOnlyFlag(a, "storage.tsdb.head-chunks-write-queue-size", "Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental."). Default("0").IntVar(&cfg.tsdb.HeadChunksWriteQueueSize) @@ -350,6 +354,9 @@ func main() { agentOnlyFlag(a, "storage.agent.wal-compression", "Compress the agent WAL."). Default("true").BoolVar(&cfg.agent.WALCompression) + agentOnlyFlag(a, "storage.agent.wal-compression-type", "Compression algorithm for the agent WAL."). + Hidden().Default(string(wlog.CompressionSnappy)).EnumVar(&cfg.agent.WALCompressionType, string(wlog.CompressionSnappy), string(wlog.CompressionZstd)) + agentOnlyFlag(a, "storage.agent.wal-truncate-frequency", "The frequency at which to truncate the WAL and remove old data."). Hidden().PlaceHolder("").SetValue(&cfg.agent.TruncateFrequency) @@ -1546,6 +1553,7 @@ type tsdbOptions struct { MaxBytes units.Base2Bytes NoLockfile bool WALCompression bool + WALCompressionType string HeadChunksWriteQueueSize int SamplesPerChunk int StripeSize int @@ -1566,7 +1574,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { MaxBytes: int64(opts.MaxBytes), NoLockfile: opts.NoLockfile, AllowOverlappingCompaction: true, - WALCompression: opts.WALCompression, + WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType), HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize, SamplesPerChunk: opts.SamplesPerChunk, StripeSize: opts.StripeSize, @@ -1585,6 +1593,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { type agentOptions struct { WALSegmentSize units.Base2Bytes WALCompression bool + WALCompressionType string StripeSize int TruncateFrequency model.Duration MinWALTime, MaxWALTime model.Duration @@ -1594,7 +1603,7 @@ type agentOptions struct { func (opts agentOptions) ToAgentOptions() agent.Options { return agent.Options{ WALSegmentSize: int(opts.WALSegmentSize), - WALCompression: opts.WALCompression, + WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType), StripeSize: opts.StripeSize, TruncateFrequency: time.Duration(opts.TruncateFrequency), MinWALTime: durationToInt64Millis(time.Duration(opts.MinWALTime)), diff --git a/go.mod b/go.mod index 48db762913..4597ee4201 100644 --- a/go.mod +++ b/go.mod @@ -34,6 +34,7 @@ require ( github.com/hetznercloud/hcloud-go v1.47.0 github.com/ionos-cloud/sdk-go/v6 v6.1.7 github.com/json-iterator/go v1.1.12 + github.com/klauspost/compress v1.15.12 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/linode/linodego v1.17.2 github.com/miekg/dns v1.1.54 diff --git a/go.sum b/go.sum index af10deb8b7..57f9f82c69 100644 --- a/go.sum +++ b/go.sum @@ -507,6 +507,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM= +github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 13cad6bfca..d47095a238 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -65,8 +65,8 @@ type Options struct { // WALSegmentSize > 0, segment size is WALSegmentSize. WALSegmentSize int - // WALCompression will turn on Snappy compression for records on the WAL. - WALCompression bool + // WALCompression configures the compression type to use on records in the WAL. + WALCompression wlog.CompressionType // StripeSize is the size (power of 2) in entries of the series hash map. Reducing the size will save memory but impact performance. StripeSize int @@ -87,7 +87,7 @@ type Options struct { func DefaultOptions() *Options { return &Options{ WALSegmentSize: wlog.DefaultSegmentSize, - WALCompression: false, + WALCompression: wlog.CompressionNone, StripeSize: tsdb.DefaultStripeSize, TruncateFrequency: DefaultTruncateFrequency, MinWALTime: DefaultMinWALTime, @@ -318,6 +318,10 @@ func validateOptions(opts *Options) *Options { opts.WALSegmentSize = wlog.DefaultSegmentSize } + if opts.WALCompression == "" { + opts.WALCompression = wlog.CompressionNone + } + // Revert Stripesize to DefaultStripsize if Stripsize is either 0 or not a power of 2. if opts.StripeSize <= 0 || ((opts.StripeSize & (opts.StripeSize - 1)) != 0) { opts.StripeSize = tsdb.DefaultStripeSize diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 27a5cdfa81..d209182689 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -39,6 +39,7 @@ import ( "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/tsdbutil" + "github.com/prometheus/prometheus/tsdb/wlog" ) func TestSplitByRange(t *testing.T) { @@ -1306,7 +1307,7 @@ func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) { func TestHeadCompactionWithHistograms(t *testing.T) { for _, floatTest := range []bool{true, false} { t.Run(fmt.Sprintf("float=%t", floatTest), func(t *testing.T) { - head, _ := newTestHead(t, DefaultBlockDuration, false, false) + head, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) require.NoError(t, head.Init(0)) t.Cleanup(func() { require.NoError(t, head.Close()) @@ -1485,11 +1486,11 @@ func TestSparseHistogramSpaceSavings(t *testing.T) { c.numBuckets, ), func(t *testing.T) { - oldHead, _ := newTestHead(t, DefaultBlockDuration, false, false) + oldHead, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, oldHead.Close()) }) - sparseHead, _ := newTestHead(t, DefaultBlockDuration, false, false) + sparseHead, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, sparseHead.Close()) }) diff --git a/tsdb/db.go b/tsdb/db.go index 62359a737e..2ca6034a03 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -77,8 +77,8 @@ func DefaultOptions() *Options { MaxBlockDuration: DefaultBlockDuration, NoLockfile: false, AllowOverlappingCompaction: true, - WALCompression: false, SamplesPerChunk: DefaultSamplesPerChunk, + WALCompression: wlog.CompressionNone, StripeSize: DefaultStripeSize, HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize, IsolationDisabled: defaultIsolationDisabled, @@ -123,8 +123,8 @@ type Options struct { // For Prometheus, this will always be true. AllowOverlappingCompaction bool - // WALCompression will turn on Snappy compression for records on the WAL. - WALCompression bool + // WALCompression configures the compression type to use on records in the WAL. + WALCompression wlog.CompressionType // Maximum number of CPUs that can simultaneously processes WAL replay. // If it is <=0, then GOMAXPROCS is used. diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 427a3b7afa..c746d50187 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -1965,7 +1965,7 @@ func TestInitializeHeadTimestamp(t *testing.T) { dir := t.TempDir() require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777)) - w, err := wlog.New(nil, nil, path.Join(dir, "wal"), false) + w, err := wlog.New(nil, nil, path.Join(dir, "wal"), wlog.CompressionNone) require.NoError(t, err) var enc record.Encoder @@ -2007,7 +2007,7 @@ func TestInitializeHeadTimestamp(t *testing.T) { createBlock(t, dir, genSeries(1, 1, 1000, 6000)) require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777)) - w, err := wlog.New(nil, nil, path.Join(dir, "wal"), false) + w, err := wlog.New(nil, nil, path.Join(dir, "wal"), wlog.CompressionNone) require.NoError(t, err) var enc record.Encoder @@ -2408,7 +2408,7 @@ func TestDBReadOnly(t *testing.T) { } // Add head to test DBReadOnly WAL reading capabilities. - w, err := wlog.New(logger, nil, filepath.Join(dbDir, "wal"), true) + w, err := wlog.New(logger, nil, filepath.Join(dbDir, "wal"), wlog.CompressionSnappy) require.NoError(t, err) h := createHead(t, w, genSeries(1, 1, 16, 18), dbDir) require.NoError(t, h.Close()) @@ -2972,7 +2972,7 @@ func TestCompactHead(t *testing.T) { NoLockfile: true, MinBlockDuration: int64(time.Hour * 2 / time.Millisecond), MaxBlockDuration: int64(time.Hour * 2 / time.Millisecond), - WALCompression: true, + WALCompression: wlog.CompressionSnappy, } db, err := Open(dbDir, log.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) @@ -3912,7 +3912,7 @@ func TestMetadataCheckpointingOnlyKeepsLatestEntry(t *testing.T) { ctx := context.Background() numSamples := 10000 - hb, w := newTestHead(t, int64(numSamples)*10, false, false) + hb, w := newTestHead(t, int64(numSamples)*10, wlog.CompressionNone, false) // Add some series so we can append metadata to them. app := hb.Appender(ctx) @@ -5099,7 +5099,7 @@ func TestWBLAndMmapReplay(t *testing.T) { resetMmapToOriginal() // We neet to reset because new duplicate chunks can be written above. // Removing m-map markers in WBL by rewriting it. - newWbl, err := wlog.New(log.NewNopLogger(), nil, filepath.Join(t.TempDir(), "new_wbl"), false) + newWbl, err := wlog.New(log.NewNopLogger(), nil, filepath.Join(t.TempDir(), "new_wbl"), wlog.CompressionNone) require.NoError(t, err) sr, err := wlog.NewSegmentsReader(originalWblDir) require.NoError(t, err) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 2828106204..f9eb88ba1f 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -52,7 +52,7 @@ import ( "github.com/prometheus/prometheus/tsdb/wlog" ) -func newTestHead(t testing.TB, chunkRange int64, compressWAL, oooEnabled bool) (*Head, *wlog.WL) { +func newTestHead(t testing.TB, chunkRange int64, compressWAL wlog.CompressionType, oooEnabled bool) (*Head, *wlog.WL) { dir := t.TempDir() wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, compressWAL) require.NoError(t, err) @@ -79,7 +79,7 @@ func newTestHead(t testing.TB, chunkRange int64, compressWAL, oooEnabled bool) ( func BenchmarkCreateSeries(b *testing.B) { series := genSeries(b.N, 10, 0, 0) - h, _ := newTestHead(b, 10000, false, false) + h, _ := newTestHead(b, 10000, wlog.CompressionNone, false) b.Cleanup(func() { require.NoError(b, h.Close()) }) @@ -100,7 +100,7 @@ func BenchmarkHeadAppender_Append_Commit_ExistingSeries(b *testing.B) { b.Run(fmt.Sprintf("%d series", seriesCount), func(b *testing.B) { for _, samplesPerAppend := range []int64{1, 2, 5, 100} { b.Run(fmt.Sprintf("%d samples per append", samplesPerAppend), func(b *testing.B) { - h, _ := newTestHead(b, 10000, false, false) + h, _ := newTestHead(b, 10000, wlog.CompressionNone, false) b.Cleanup(func() { require.NoError(b, h.Close()) }) ts := int64(1000) @@ -245,7 +245,7 @@ func BenchmarkLoadWAL(b *testing.B) { func(b *testing.B) { dir := b.TempDir() - w, err := wlog.New(nil, nil, dir, false) + w, err := wlog.New(nil, nil, dir, wlog.CompressionNone) require.NoError(b, err) // Write series. @@ -337,7 +337,7 @@ func BenchmarkLoadWAL(b *testing.B) { // While appending the samples to the head it concurrently queries them from multiple go routines and verifies that the // returned results are correct. func TestHead_HighConcurrencyReadAndWrite(t *testing.T) { - head, _ := newTestHead(t, DefaultBlockDuration, false, false) + head, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) defer func() { require.NoError(t, head.Close()) }() @@ -527,8 +527,8 @@ func TestHead_HighConcurrencyReadAndWrite(t *testing.T) { } func TestHead_ReadWAL(t *testing.T) { - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []wlog.CompressionType{wlog.CompressionNone, wlog.CompressionSnappy, wlog.CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { entries := []interface{}{ []record.RefSeries{ {Ref: 10, Labels: labels.FromStrings("a", "1")}, @@ -609,7 +609,7 @@ func TestHead_ReadWAL(t *testing.T) { } func TestHead_WALMultiRef(t *testing.T) { - head, w := newTestHead(t, 1000, false, false) + head, w := newTestHead(t, 1000, wlog.CompressionNone, false) require.NoError(t, head.Init(0)) @@ -644,7 +644,7 @@ func TestHead_WALMultiRef(t *testing.T) { require.NotEqual(t, ref1, ref2, "Refs are the same") require.NoError(t, head.Close()) - w, err = wlog.New(nil, nil, w.Dir(), false) + w, err = wlog.New(nil, nil, w.Dir(), wlog.CompressionNone) require.NoError(t, err) opts := DefaultHeadOptions() @@ -669,7 +669,7 @@ func TestHead_WALMultiRef(t *testing.T) { } func TestHead_ActiveAppenders(t *testing.T) { - head, _ := newTestHead(t, 1000, false, false) + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer head.Close() require.NoError(t, head.Init(0)) @@ -702,14 +702,14 @@ func TestHead_ActiveAppenders(t *testing.T) { } func TestHead_UnknownWALRecord(t *testing.T) { - head, w := newTestHead(t, 1000, false, false) + head, w := newTestHead(t, 1000, wlog.CompressionNone, false) w.Log([]byte{255, 42}) require.NoError(t, head.Init(0)) require.NoError(t, head.Close()) } func TestHead_Truncate(t *testing.T) { - h, _ := newTestHead(t, 1000, false, false) + h, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, h.Close()) }() @@ -847,8 +847,8 @@ func TestMemSeries_truncateChunks(t *testing.T) { } func TestHeadDeleteSeriesWithoutSamples(t *testing.T) { - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []wlog.CompressionType{wlog.CompressionNone, wlog.CompressionSnappy, wlog.CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { entries := []interface{}{ []record.RefSeries{ {Ref: 10, Labels: labels.FromStrings("a", "1")}, @@ -927,8 +927,8 @@ func TestHeadDeleteSimple(t *testing.T) { }, } - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []wlog.CompressionType{wlog.CompressionNone, wlog.CompressionSnappy, wlog.CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { for _, c := range cases { head, w := newTestHead(t, 1000, compress, false) require.NoError(t, head.Init(0)) @@ -1011,7 +1011,7 @@ func TestHeadDeleteSimple(t *testing.T) { } func TestDeleteUntilCurMax(t *testing.T) { - hb, _ := newTestHead(t, 1000000, false, false) + hb, _ := newTestHead(t, 1000000, wlog.CompressionNone, false) defer func() { require.NoError(t, hb.Close()) }() @@ -1064,7 +1064,7 @@ func TestDeletedSamplesAndSeriesStillInWALAfterCheckpoint(t *testing.T) { numSamples := 10000 // Enough samples to cause a checkpoint. - hb, w := newTestHead(t, int64(numSamples)*10, false, false) + hb, w := newTestHead(t, int64(numSamples)*10, wlog.CompressionNone, false) for i := 0; i < numSamples; i++ { app := hb.Appender(context.Background()) @@ -1156,7 +1156,7 @@ func TestDelete_e2e(t *testing.T) { seriesMap[labels.New(l...).String()] = []tsdbutil.Sample{} } - hb, _ := newTestHead(t, 100000, false, false) + hb, _ := newTestHead(t, 100000, wlog.CompressionNone, false) defer func() { require.NoError(t, hb.Close()) }() @@ -1506,7 +1506,7 @@ func TestMemSeries_append_atVariableRate(t *testing.T) { func TestGCChunkAccess(t *testing.T) { // Put a chunk, select it. GC it and then access it. const chunkRange = 1000 - h, _ := newTestHead(t, chunkRange, false, false) + h, _ := newTestHead(t, chunkRange, wlog.CompressionNone, false) defer func() { require.NoError(t, h.Close()) }() @@ -1565,7 +1565,7 @@ func TestGCChunkAccess(t *testing.T) { func TestGCSeriesAccess(t *testing.T) { // Put a series, select it. GC it and then access it. const chunkRange = 1000 - h, _ := newTestHead(t, chunkRange, false, false) + h, _ := newTestHead(t, chunkRange, wlog.CompressionNone, false) defer func() { require.NoError(t, h.Close()) }() @@ -1624,7 +1624,7 @@ func TestGCSeriesAccess(t *testing.T) { } func TestUncommittedSamplesNotLostOnTruncate(t *testing.T) { - h, _ := newTestHead(t, 1000, false, false) + h, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, h.Close()) }() @@ -1654,7 +1654,7 @@ func TestUncommittedSamplesNotLostOnTruncate(t *testing.T) { } func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) { - h, _ := newTestHead(t, 1000, false, false) + h, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, h.Close()) }() @@ -1685,8 +1685,8 @@ func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) { } func TestHead_LogRollback(t *testing.T) { - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []wlog.CompressionType{wlog.CompressionNone, wlog.CompressionSnappy, wlog.CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { h, w := newTestHead(t, 1000, compress, false) defer func() { require.NoError(t, h.Close()) @@ -1743,8 +1743,8 @@ func TestWalRepair_DecodingError(t *testing.T) { 5, }, } { - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("%s,compress=%t", name, compress), func(t *testing.T) { + for _, compress := range []wlog.CompressionType{wlog.CompressionNone, wlog.CompressionSnappy, wlog.CompressionZstd} { + t.Run(fmt.Sprintf("%s,compress=%s", name, compress), func(t *testing.T) { dir := t.TempDir() // Fill the wal and corrupt it. @@ -1812,7 +1812,7 @@ func TestHeadReadWriterRepair(t *testing.T) { walDir := filepath.Join(dir, "wal") // Fill the chunk segments and corrupt it. { - w, err := wlog.New(nil, nil, walDir, false) + w, err := wlog.New(nil, nil, walDir, wlog.CompressionNone) require.NoError(t, err) opts := DefaultHeadOptions() @@ -1880,7 +1880,7 @@ func TestHeadReadWriterRepair(t *testing.T) { } func TestNewWalSegmentOnTruncate(t *testing.T) { - h, wal := newTestHead(t, 1000, false, false) + h, wal := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, h.Close()) }() @@ -1910,7 +1910,7 @@ func TestNewWalSegmentOnTruncate(t *testing.T) { } func TestAddDuplicateLabelName(t *testing.T) { - h, _ := newTestHead(t, 1000, false, false) + h, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, h.Close()) }() @@ -1993,7 +1993,7 @@ func TestMemSeriesIsolation(t *testing.T) { } // Test isolation without restart of Head. - hb, _ := newTestHead(t, 1000, false, false) + hb, _ := newTestHead(t, 1000, wlog.CompressionNone, false) i := addSamples(hb) testIsolation(hb, i) @@ -2055,11 +2055,11 @@ func TestMemSeriesIsolation(t *testing.T) { require.NoError(t, hb.Close()) // Test isolation with restart of Head. This is to verify the num samples of chunks after m-map chunk replay. - hb, w := newTestHead(t, 1000, false, false) + hb, w := newTestHead(t, 1000, wlog.CompressionNone, false) i = addSamples(hb) require.NoError(t, hb.Close()) - wal, err := wlog.NewSize(nil, nil, w.Dir(), 32768, false) + wal, err := wlog.NewSize(nil, nil, w.Dir(), 32768, wlog.CompressionNone) require.NoError(t, err) opts := DefaultHeadOptions() opts.ChunkRange = 1000 @@ -2108,7 +2108,7 @@ func TestIsolationRollback(t *testing.T) { } // Rollback after a failed append and test if the low watermark has progressed anyway. - hb, _ := newTestHead(t, 1000, false, false) + hb, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, hb.Close()) }() @@ -2139,7 +2139,7 @@ func TestIsolationLowWatermarkMonotonous(t *testing.T) { t.Skip("skipping test since tsdb isolation is disabled") } - hb, _ := newTestHead(t, 1000, false, false) + hb, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, hb.Close()) }() @@ -2176,7 +2176,7 @@ func TestIsolationAppendIDZeroIsNoop(t *testing.T) { t.Skip("skipping test since tsdb isolation is disabled") } - h, _ := newTestHead(t, 1000, false, false) + h, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, h.Close()) }() @@ -2207,7 +2207,7 @@ func TestIsolationWithoutAdd(t *testing.T) { t.Skip("skipping test since tsdb isolation is disabled") } - hb, _ := newTestHead(t, 1000, false, false) + hb, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, hb.Close()) }() @@ -2302,7 +2302,7 @@ func TestOutOfOrderSamplesMetric(t *testing.T) { } func testHeadSeriesChunkRace(t *testing.T) { - h, _ := newTestHead(t, 1000, false, false) + h, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, h.Close()) }() @@ -2337,7 +2337,7 @@ func testHeadSeriesChunkRace(t *testing.T) { } func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) { - head, _ := newTestHead(t, 1000, false, false) + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, head.Close()) }() @@ -2397,7 +2397,7 @@ func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) { } func TestHeadLabelValuesWithMatchers(t *testing.T) { - head, _ := newTestHead(t, 1000, false, false) + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, head.Close()) }) app := head.Appender(context.Background()) @@ -2456,7 +2456,7 @@ func TestHeadLabelValuesWithMatchers(t *testing.T) { } func TestHeadLabelNamesWithMatchers(t *testing.T) { - head, _ := newTestHead(t, 1000, false, false) + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, head.Close()) }() @@ -2524,7 +2524,7 @@ func TestHeadLabelNamesWithMatchers(t *testing.T) { } func TestErrReuseAppender(t *testing.T) { - head, _ := newTestHead(t, 1000, false, false) + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, head.Close()) }() @@ -2560,7 +2560,7 @@ func TestErrReuseAppender(t *testing.T) { func TestHeadMintAfterTruncation(t *testing.T) { chunkRange := int64(2000) - head, _ := newTestHead(t, chunkRange, false, false) + head, _ := newTestHead(t, chunkRange, wlog.CompressionNone, false) app := head.Appender(context.Background()) _, err := app.Append(0, labels.FromStrings("a", "b"), 100, 100) @@ -2594,7 +2594,7 @@ func TestHeadMintAfterTruncation(t *testing.T) { func TestHeadExemplars(t *testing.T) { chunkRange := int64(2000) - head, _ := newTestHead(t, chunkRange, false, false) + head, _ := newTestHead(t, chunkRange, wlog.CompressionNone, false) app := head.Appender(context.Background()) l := labels.FromStrings("traceId", "123") @@ -2616,7 +2616,7 @@ func TestHeadExemplars(t *testing.T) { func BenchmarkHeadLabelValuesWithMatchers(b *testing.B) { chunkRange := int64(2000) - head, _ := newTestHead(b, chunkRange, false, false) + head, _ := newTestHead(b, chunkRange, wlog.CompressionNone, false) b.Cleanup(func() { require.NoError(b, head.Close()) }) app := head.Appender(context.Background()) @@ -2930,7 +2930,7 @@ func TestAppendHistogram(t *testing.T) { l := labels.FromStrings("a", "b") for _, numHistograms := range []int{1, 10, 150, 200, 250, 300} { t.Run(fmt.Sprintf("%d", numHistograms), func(t *testing.T) { - head, _ := newTestHead(t, 1000, false, false) + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, head.Close()) }) @@ -3034,7 +3034,7 @@ func TestAppendHistogram(t *testing.T) { } func TestHistogramInWALAndMmapChunk(t *testing.T) { - head, _ := newTestHead(t, 3000, false, false) + head, _ := newTestHead(t, 3000, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, head.Close()) }) @@ -3188,7 +3188,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { // Restart head. require.NoError(t, head.Close()) startHead := func() { - w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false) + w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone) require.NoError(t, err) head, err = NewHead(nil, nil, w, nil, head.opts, nil) require.NoError(t, err) @@ -3217,7 +3217,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { } func TestChunkSnapshot(t *testing.T) { - head, _ := newTestHead(t, 120*4, false, false) + head, _ := newTestHead(t, 120*4, wlog.CompressionNone, false) defer func() { head.opts.EnableMemorySnapshotOnShutdown = false require.NoError(t, head.Close()) @@ -3310,7 +3310,7 @@ func TestChunkSnapshot(t *testing.T) { } openHeadAndCheckReplay := func() { - w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false) + w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone) require.NoError(t, err) head, err = NewHead(nil, nil, w, nil, head.opts, nil) require.NoError(t, err) @@ -3505,7 +3505,7 @@ func TestChunkSnapshot(t *testing.T) { } func TestSnapshotError(t *testing.T) { - head, _ := newTestHead(t, 120*4, false, false) + head, _ := newTestHead(t, 120*4, wlog.CompressionNone, false) defer func() { head.opts.EnableMemorySnapshotOnShutdown = false require.NoError(t, head.Close()) @@ -3562,7 +3562,7 @@ func TestSnapshotError(t *testing.T) { require.NoError(t, f.Close()) // Create new Head which should replay this snapshot. - w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false) + w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone) require.NoError(t, err) // Testing https://github.com/prometheus/prometheus/issues/9437 with the registry. head, err = NewHead(prometheus.NewRegistry(), nil, w, nil, head.opts, nil) @@ -3579,7 +3579,7 @@ func TestSnapshotError(t *testing.T) { func TestHistogramMetrics(t *testing.T) { numHistograms := 10 - head, _ := newTestHead(t, 1000, false, false) + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, head.Close()) }) @@ -3609,7 +3609,7 @@ func TestHistogramMetrics(t *testing.T) { require.Equal(t, float64(expHSamples), prom_testutil.ToFloat64(head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram))) require.NoError(t, head.Close()) - w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false) + w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone) require.NoError(t, err) head, err = NewHead(nil, nil, w, nil, head.opts, nil) require.NoError(t, err) @@ -3631,7 +3631,7 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) { t.Helper() l := labels.FromStrings("a", "b") numHistograms := 20 - head, _ := newTestHead(t, 100000, false, false) + head, _ := newTestHead(t, 100000, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, head.Close()) }) @@ -3778,7 +3778,7 @@ func TestHistogramCounterResetHeader(t *testing.T) { for _, floatHisto := range []bool{true, false} { t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) { l := labels.FromStrings("a", "b") - head, _ := newTestHead(t, 1000, false, false) + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, head.Close()) }) @@ -4041,7 +4041,7 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) { // Tests https://github.com/prometheus/prometheus/issues/9725. func TestChunkSnapshotReplayBug(t *testing.T) { dir := t.TempDir() - wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, true) + wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) require.NoError(t, err) // Write few series records and samples such that the series references are not in order in the WAL @@ -4108,7 +4108,7 @@ func TestChunkSnapshotReplayBug(t *testing.T) { func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) { dir := t.TempDir() - wlTemp, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, true) + wlTemp, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) require.NoError(t, err) // Write a snapshot with .tmp suffix. This used to fail taking any further snapshots or replay of snapshots. @@ -4146,9 +4146,9 @@ func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) { // TODO(codesome): Needs test for ooo WAL repair. func TestOOOWalReplay(t *testing.T) { dir := t.TempDir() - wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, true) + wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) require.NoError(t, err) - oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, true) + oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, wlog.CompressionSnappy) require.NoError(t, err) opts := DefaultHeadOptions() @@ -4193,9 +4193,9 @@ func TestOOOWalReplay(t *testing.T) { // Restart head. require.NoError(t, h.Close()) - wal, err = wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, true) + wal, err = wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) require.NoError(t, err) - oooWlog, err = wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, true) + oooWlog, err = wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, wlog.CompressionSnappy) require.NoError(t, err) h, err = NewHead(nil, nil, wal, oooWlog, opts, nil) require.NoError(t, err) @@ -4230,9 +4230,9 @@ func TestOOOWalReplay(t *testing.T) { // TestOOOMmapReplay checks the replay at a low level. func TestOOOMmapReplay(t *testing.T) { dir := t.TempDir() - wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, true) + wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) require.NoError(t, err) - oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, true) + oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, wlog.CompressionSnappy) require.NoError(t, err) opts := DefaultHeadOptions() @@ -4281,9 +4281,9 @@ func TestOOOMmapReplay(t *testing.T) { // Restart head. require.NoError(t, h.Close()) - wal, err = wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, true) + wal, err = wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) require.NoError(t, err) - oooWlog, err = wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, true) + oooWlog, err = wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, wlog.CompressionSnappy) require.NoError(t, err) h, err = NewHead(nil, nil, wal, oooWlog, opts, nil) require.NoError(t, err) @@ -4312,7 +4312,7 @@ func TestOOOMmapReplay(t *testing.T) { } func TestHeadInit_DiscardChunksWithUnsupportedEncoding(t *testing.T) { - h, _ := newTestHead(t, 1000, false, false) + h, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, h.Close()) }() @@ -4355,7 +4355,7 @@ func TestHeadInit_DiscardChunksWithUnsupportedEncoding(t *testing.T) { require.NoError(t, h.Close()) - wal, err := wlog.NewSize(nil, nil, filepath.Join(h.opts.ChunkDirRoot, "wal"), 32768, false) + wal, err := wlog.NewSize(nil, nil, filepath.Join(h.opts.ChunkDirRoot, "wal"), 32768, wlog.CompressionNone) require.NoError(t, err) h, err = NewHead(nil, nil, wal, nil, h.opts, nil) require.NoError(t, err) @@ -4390,7 +4390,7 @@ func (c *unsupportedChunk) Encoding() chunkenc.Encoding { // Tests https://github.com/prometheus/prometheus/issues/10277. func TestMmapPanicAfterMmapReplayCorruption(t *testing.T) { dir := t.TempDir() - wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, false) + wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionNone) require.NoError(t, err) opts := DefaultHeadOptions() @@ -4423,7 +4423,7 @@ func TestMmapPanicAfterMmapReplayCorruption(t *testing.T) { addChunks() require.NoError(t, h.Close()) - wal, err = wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, false) + wal, err = wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionNone) require.NoError(t, err) mmapFilePath := filepath.Join(dir, "chunks_head", "000001") @@ -4449,7 +4449,7 @@ func TestReplayAfterMmapReplayError(t *testing.T) { var err error openHead := func() { - wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, false) + wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionNone) require.NoError(t, err) opts := DefaultHeadOptions() @@ -4667,9 +4667,9 @@ func generateBigTestHistograms(n int) []*histogram.Histogram { func TestOOOAppendWithNoSeries(t *testing.T) { dir := t.TempDir() - wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, true) + wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) require.NoError(t, err) - oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, true) + oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, wlog.CompressionSnappy) require.NoError(t, err) opts := DefaultHeadOptions() @@ -4748,9 +4748,9 @@ func TestOOOAppendWithNoSeries(t *testing.T) { func TestHeadMinOOOTimeUpdate(t *testing.T) { dir := t.TempDir() - wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, true) + wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) require.NoError(t, err) - oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, true) + oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, wlog.CompressionSnappy) require.NoError(t, err) opts := DefaultHeadOptions() @@ -4795,7 +4795,7 @@ func TestHeadMinOOOTimeUpdate(t *testing.T) { func TestGaugeHistogramWALAndChunkHeader(t *testing.T) { l := labels.FromStrings("a", "b") - head, _ := newTestHead(t, 1000, false, false) + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, head.Close()) }) @@ -4859,7 +4859,7 @@ func TestGaugeHistogramWALAndChunkHeader(t *testing.T) { require.NoError(t, head.Close()) require.NoError(t, os.RemoveAll(mmappedChunksDir(head.opts.ChunkDirRoot))) - w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false) + w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone) require.NoError(t, err) head, err = NewHead(nil, nil, w, nil, head.opts, nil) require.NoError(t, err) @@ -4870,7 +4870,7 @@ func TestGaugeHistogramWALAndChunkHeader(t *testing.T) { func TestGaugeFloatHistogramWALAndChunkHeader(t *testing.T) { l := labels.FromStrings("a", "b") - head, _ := newTestHead(t, 1000, false, false) + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, head.Close()) }) @@ -4934,7 +4934,7 @@ func TestGaugeFloatHistogramWALAndChunkHeader(t *testing.T) { require.NoError(t, head.Close()) require.NoError(t, os.RemoveAll(mmappedChunksDir(head.opts.ChunkDirRoot))) - w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false) + w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone) require.NoError(t, err) head, err = NewHead(nil, nil, w, nil, head.opts, nil) require.NoError(t, err) @@ -4944,7 +4944,7 @@ func TestGaugeFloatHistogramWALAndChunkHeader(t *testing.T) { } func TestSnapshotAheadOfWALError(t *testing.T) { - head, _ := newTestHead(t, 120*4, false, false) + head, _ := newTestHead(t, 120*4, wlog.CompressionNone, false) head.opts.EnableMemorySnapshotOnShutdown = true // Add a sample to fill WAL. app := head.Appender(context.Background()) @@ -4967,7 +4967,7 @@ func TestSnapshotAheadOfWALError(t *testing.T) { // to keep using the same snapshot directory instead of a random one. require.NoError(t, os.RemoveAll(head.wal.Dir())) head.opts.EnableMemorySnapshotOnShutdown = false - w, _ := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false) + w, _ := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone) head, err = NewHead(nil, nil, w, nil, head.opts, nil) require.NoError(t, err) // Add a sample to fill WAL. @@ -4986,7 +4986,7 @@ func TestSnapshotAheadOfWALError(t *testing.T) { // Create new Head which should detect the incorrect index and delete the snapshot. head.opts.EnableMemorySnapshotOnShutdown = true - w, _ = wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false) + w, _ = wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone) head, err = NewHead(nil, nil, w, nil, head.opts, nil) require.NoError(t, err) require.NoError(t, head.Init(math.MinInt64)) diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 2fe33befba..2397a9ec97 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -1119,7 +1119,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { if err := os.MkdirAll(cpdirtmp, 0o777); err != nil { return stats, errors.Wrap(err, "create chunk snapshot dir") } - cp, err := wlog.New(nil, nil, cpdirtmp, h.wal.CompressionEnabled()) + cp, err := wlog.New(nil, nil, cpdirtmp, h.wal.CompressionType()) if err != nil { return stats, errors.Wrap(err, "open chunk snapshot") } diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index ed9ca27690..6c0038f897 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -29,6 +29,7 @@ import ( "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/tsdbutil" + "github.com/prometheus/prometheus/tsdb/wlog" ) type chunkInterval struct { @@ -295,7 +296,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { for perm, intervals := range permutations { for _, headChunk := range []bool{false, true} { t.Run(fmt.Sprintf("name=%s, permutation=%d, headChunk=%t", tc.name, perm, headChunk), func(t *testing.T) { - h, _ := newTestHead(t, 1000, false, true) + h, _ := newTestHead(t, 1000, wlog.CompressionNone, true) defer func() { require.NoError(t, h.Close()) }() @@ -375,7 +376,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { func TestOOOHeadChunkReader_LabelValues(t *testing.T) { chunkRange := int64(2000) - head, _ := newTestHead(t, chunkRange, false, true) + head, _ := newTestHead(t, chunkRange, wlog.CompressionNone, true) t.Cleanup(func() { require.NoError(t, head.Close()) }) app := head.Appender(context.Background()) diff --git a/tsdb/wal.go b/tsdb/wal.go index 70378021ae..3a410fb636 100644 --- a/tsdb/wal.go +++ b/tsdb/wal.go @@ -1226,7 +1226,7 @@ func MigrateWAL(logger log.Logger, dir string) (err error) { if err := os.RemoveAll(tmpdir); err != nil { return errors.Wrap(err, "cleanup replacement dir") } - repl, err := wlog.New(logger, nil, tmpdir, false) + repl, err := wlog.New(logger, nil, tmpdir, wlog.CompressionNone) if err != nil { return errors.Wrap(err, "open new WAL") } diff --git a/tsdb/wal_test.go b/tsdb/wal_test.go index da242b8750..5b2911131a 100644 --- a/tsdb/wal_test.go +++ b/tsdb/wal_test.go @@ -450,7 +450,7 @@ func TestMigrateWAL_Empty(t *testing.T) { wdir := path.Join(dir, "wal") // Initialize empty WAL. - w, err := wlog.New(nil, nil, wdir, false) + w, err := wlog.New(nil, nil, wdir, wlog.CompressionNone) require.NoError(t, err) require.NoError(t, w.Close()) @@ -493,7 +493,7 @@ func TestMigrateWAL_Fuzz(t *testing.T) { // Perform migration. require.NoError(t, MigrateWAL(nil, wdir)) - w, err := wlog.New(nil, nil, wdir, false) + w, err := wlog.New(nil, nil, wdir, wlog.CompressionNone) require.NoError(t, err) // We can properly write some new data after migration. diff --git a/tsdb/wlog/checkpoint.go b/tsdb/wlog/checkpoint.go index fe9952a303..dd52ea2e3d 100644 --- a/tsdb/wlog/checkpoint.go +++ b/tsdb/wlog/checkpoint.go @@ -134,7 +134,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head if err := os.MkdirAll(cpdirtmp, 0o777); err != nil { return nil, errors.Wrap(err, "create checkpoint dir") } - cp, err := New(nil, nil, cpdirtmp, w.CompressionEnabled()) + cp, err := New(nil, nil, cpdirtmp, w.CompressionType()) if err != nil { return nil, errors.Wrap(err, "open checkpoint") } diff --git a/tsdb/wlog/checkpoint_test.go b/tsdb/wlog/checkpoint_test.go index 22f577efde..704a65cc15 100644 --- a/tsdb/wlog/checkpoint_test.go +++ b/tsdb/wlog/checkpoint_test.go @@ -126,8 +126,8 @@ func TestCheckpoint(t *testing.T) { } } - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { dir := t.TempDir() var enc record.Encoder @@ -303,7 +303,7 @@ func TestCheckpoint(t *testing.T) { func TestCheckpointNoTmpFolderAfterError(t *testing.T) { // Create a new wlog with invalid data. dir := t.TempDir() - w, err := NewSize(nil, nil, dir, 64*1024, false) + w, err := NewSize(nil, nil, dir, 64*1024, CompressionNone) require.NoError(t, err) var enc record.Encoder require.NoError(t, w.Log(enc.Series([]record.RefSeries{ diff --git a/tsdb/wlog/live_reader.go b/tsdb/wlog/live_reader.go index 0ca69093a5..c69017051b 100644 --- a/tsdb/wlog/live_reader.go +++ b/tsdb/wlog/live_reader.go @@ -23,6 +23,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/golang/snappy" + "github.com/klauspost/compress/zstd" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" ) @@ -51,10 +52,14 @@ func NewLiveReaderMetrics(reg prometheus.Registerer) *LiveReaderMetrics { // NewLiveReader returns a new live reader. func NewLiveReader(logger log.Logger, metrics *LiveReaderMetrics, r io.Reader) *LiveReader { + // Calling zstd.NewReader with a nil io.Reader and no options cannot return an error. + zstdReader, _ := zstd.NewReader(nil) + lr := &LiveReader{ - logger: logger, - rdr: r, - metrics: metrics, + logger: logger, + rdr: r, + zstdReader: zstdReader, + metrics: metrics, // Until we understand how they come about, make readers permissive // to records spanning pages. @@ -68,17 +73,18 @@ func NewLiveReader(logger log.Logger, metrics *LiveReaderMetrics, r io.Reader) * // that are still in the process of being written, and returns records as soon // as they can be read. type LiveReader struct { - logger log.Logger - rdr io.Reader - err error - rec []byte - snappyBuf []byte - hdr [recordHeaderSize]byte - buf [pageSize]byte - readIndex int // Index in buf to start at for next read. - writeIndex int // Index in buf to start at for next write. - total int64 // Total bytes processed during reading in calls to Next(). - index int // Used to track partial records, should be 0 at the start of every new record. + logger log.Logger + rdr io.Reader + err error + rec []byte + compressBuf []byte + zstdReader *zstd.Decoder + hdr [recordHeaderSize]byte + buf [pageSize]byte + readIndex int // Index in buf to start at for next read. + writeIndex int // Index in buf to start at for next write. + total int64 // Total bytes processed during reading in calls to Next(). + index int // Used to track partial records, should be 0 at the start of every new record. // For testing, we can treat EOF as a non-error. eofNonErr bool @@ -191,12 +197,14 @@ func (r *LiveReader) buildRecord() (bool, error) { rt := recTypeFromHeader(r.hdr[0]) if rt == recFirst || rt == recFull { r.rec = r.rec[:0] - r.snappyBuf = r.snappyBuf[:0] + r.compressBuf = r.compressBuf[:0] } - compressed := r.hdr[0]&snappyMask != 0 - if compressed { - r.snappyBuf = append(r.snappyBuf, temp...) + isSnappyCompressed := r.hdr[0]&snappyMask == snappyMask + isZstdCompressed := r.hdr[0]&zstdMask == zstdMask + + if isSnappyCompressed || isZstdCompressed { + r.compressBuf = append(r.compressBuf, temp...) } else { r.rec = append(r.rec, temp...) } @@ -207,12 +215,17 @@ func (r *LiveReader) buildRecord() (bool, error) { } if rt == recLast || rt == recFull { r.index = 0 - if compressed && len(r.snappyBuf) > 0 { + if isSnappyCompressed && len(r.compressBuf) > 0 { // The snappy library uses `len` to calculate if we need a new buffer. // In order to allocate as few buffers as possible make the length // equal to the capacity. r.rec = r.rec[:cap(r.rec)] - r.rec, err = snappy.Decode(r.rec, r.snappyBuf) + r.rec, err = snappy.Decode(r.rec, r.compressBuf) + if err != nil { + return false, err + } + } else if isZstdCompressed && len(r.compressBuf) > 0 { + r.rec, err = r.zstdReader.DecodeAll(r.compressBuf, r.rec[:0]) if err != nil { return false, err } diff --git a/tsdb/wlog/reader.go b/tsdb/wlog/reader.go index cba2167646..f77b03b8ea 100644 --- a/tsdb/wlog/reader.go +++ b/tsdb/wlog/reader.go @@ -20,23 +20,27 @@ import ( "io" "github.com/golang/snappy" + "github.com/klauspost/compress/zstd" "github.com/pkg/errors" ) // Reader reads WAL records from an io.Reader. type Reader struct { - rdr io.Reader - err error - rec []byte - snappyBuf []byte - buf [pageSize]byte - total int64 // Total bytes processed. - curRecTyp recType // Used for checking that the last record is not torn. + rdr io.Reader + err error + rec []byte + compressBuf []byte + zstdReader *zstd.Decoder + buf [pageSize]byte + total int64 // Total bytes processed. + curRecTyp recType // Used for checking that the last record is not torn. } // NewReader returns a new reader. func NewReader(r io.Reader) *Reader { - return &Reader{rdr: r} + // Calling zstd.NewReader with a nil io.Reader and no options cannot return an error. + zstdReader, _ := zstd.NewReader(nil) + return &Reader{rdr: r, zstdReader: zstdReader} } // Next advances the reader to the next records and returns true if it exists. @@ -63,7 +67,7 @@ func (r *Reader) next() (err error) { buf := r.buf[recordHeaderSize:] r.rec = r.rec[:0] - r.snappyBuf = r.snappyBuf[:0] + r.compressBuf = r.compressBuf[:0] i := 0 for { @@ -72,7 +76,8 @@ func (r *Reader) next() (err error) { } r.total++ r.curRecTyp = recTypeFromHeader(hdr[0]) - compressed := hdr[0]&snappyMask != 0 + isSnappyCompressed := hdr[0]&snappyMask == snappyMask + isZstdCompressed := hdr[0]&zstdMask == zstdMask // Gobble up zero bytes. if r.curRecTyp == recPageTerm { @@ -128,8 +133,8 @@ func (r *Reader) next() (err error) { return errors.Errorf("unexpected checksum %x, expected %x", c, crc) } - if compressed { - r.snappyBuf = append(r.snappyBuf, buf[:length]...) + if isSnappyCompressed || isZstdCompressed { + r.compressBuf = append(r.compressBuf, buf[:length]...) } else { r.rec = append(r.rec, buf[:length]...) } @@ -138,12 +143,15 @@ func (r *Reader) next() (err error) { return err } if r.curRecTyp == recLast || r.curRecTyp == recFull { - if compressed && len(r.snappyBuf) > 0 { + if isSnappyCompressed && len(r.compressBuf) > 0 { // The snappy library uses `len` to calculate if we need a new buffer. // In order to allocate as few buffers as possible make the length // equal to the capacity. r.rec = r.rec[:cap(r.rec)] - r.rec, err = snappy.Decode(r.rec, r.snappyBuf) + r.rec, err = snappy.Decode(r.rec, r.compressBuf) + return err + } else if isZstdCompressed && len(r.compressBuf) > 0 { + r.rec, err = r.zstdReader.DecodeAll(r.compressBuf, r.rec[:0]) return err } return nil diff --git a/tsdb/wlog/reader_test.go b/tsdb/wlog/reader_test.go index 737520e76a..2c4dd622c0 100644 --- a/tsdb/wlog/reader_test.go +++ b/tsdb/wlog/reader_test.go @@ -310,8 +310,8 @@ func allSegments(dir string) (io.ReadCloser, error) { func TestReaderFuzz(t *testing.T) { for name, fn := range readerConstructors { - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("%s,compress=%t", name, compress), func(t *testing.T) { + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + t.Run(fmt.Sprintf("%s,compress=%s", name, compress), func(t *testing.T) { dir := t.TempDir() w, err := NewSize(nil, nil, dir, 128*pageSize, compress) @@ -349,8 +349,8 @@ func TestReaderFuzz(t *testing.T) { func TestReaderFuzz_Live(t *testing.T) { logger := testutil.NewLogger(t) - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { dir := t.TempDir() w, err := NewSize(nil, nil, dir, 128*pageSize, compress) @@ -439,7 +439,7 @@ func TestLiveReaderCorrupt_ShortFile(t *testing.T) { logger := testutil.NewLogger(t) dir := t.TempDir() - w, err := NewSize(nil, nil, dir, pageSize, false) + w, err := NewSize(nil, nil, dir, pageSize, CompressionNone) require.NoError(t, err) rec := make([]byte, pageSize-recordHeaderSize) @@ -479,7 +479,7 @@ func TestLiveReaderCorrupt_RecordTooLongAndShort(t *testing.T) { logger := testutil.NewLogger(t) dir := t.TempDir() - w, err := NewSize(nil, nil, dir, pageSize*2, false) + w, err := NewSize(nil, nil, dir, pageSize*2, CompressionNone) require.NoError(t, err) rec := make([]byte, pageSize-recordHeaderSize) @@ -526,7 +526,7 @@ func TestReaderData(t *testing.T) { for name, fn := range readerConstructors { t.Run(name, func(t *testing.T) { - w, err := New(nil, nil, dir, true) + w, err := New(nil, nil, dir, CompressionSnappy) require.NoError(t, err) sr, err := allSegments(dir) diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go index 94b6a92d12..bc6a10126e 100644 --- a/tsdb/wlog/watcher_test.go +++ b/tsdb/wlog/watcher_test.go @@ -122,8 +122,8 @@ func TestTailSamples(t *testing.T) { const samplesCount = 250 const exemplarsCount = 25 const histogramsCount = 50 - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { now := time.Now() dir := t.TempDir() @@ -246,8 +246,8 @@ func TestReadToEndNoCheckpoint(t *testing.T) { const seriesCount = 10 const samplesCount = 250 - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") err := os.Mkdir(wdir, 0o777) @@ -314,8 +314,8 @@ func TestReadToEndWithCheckpoint(t *testing.T) { const seriesCount = 10 const samplesCount = 250 - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") @@ -402,8 +402,8 @@ func TestReadCheckpoint(t *testing.T) { const seriesCount = 10 const samplesCount = 250 - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") @@ -475,8 +475,8 @@ func TestReadCheckpointMultipleSegments(t *testing.T) { const seriesCount = 20 const samplesCount = 300 - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") @@ -546,15 +546,15 @@ func TestCheckpointSeriesReset(t *testing.T) { const seriesCount = 20 const samplesCount = 350 testCases := []struct { - compress bool + compress CompressionType segments int }{ - {compress: false, segments: 14}, - {compress: true, segments: 13}, + {compress: CompressionNone, segments: 14}, + {compress: CompressionSnappy, segments: 13}, } for _, tc := range testCases { - t.Run(fmt.Sprintf("compress=%t", tc.compress), func(t *testing.T) { + t.Run(fmt.Sprintf("compress=%s", tc.compress), func(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") diff --git a/tsdb/wlog/wlog.go b/tsdb/wlog/wlog.go index b7b1623f90..d898ebd7ac 100644 --- a/tsdb/wlog/wlog.go +++ b/tsdb/wlog/wlog.go @@ -29,6 +29,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/golang/snappy" + "github.com/klauspost/compress/zstd" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "golang.org/x/exp/slices" @@ -164,6 +165,26 @@ func OpenReadSegment(fn string) (*Segment, error) { return &Segment{SegmentFile: f, i: k, dir: filepath.Dir(fn)}, nil } +type CompressionType string + +const ( + CompressionNone CompressionType = "none" + CompressionSnappy CompressionType = "snappy" + CompressionZstd CompressionType = "zstd" +) + +// ParseCompressionType parses the two compression-related configuration values and returns the CompressionType. If +// compression is enabled but the compressType is unrecognized, we default to Snappy compression. +func ParseCompressionType(compress bool, compressType string) CompressionType { + if compress { + if compressType == "zstd" { + return CompressionZstd + } + return CompressionSnappy + } + return CompressionNone +} + // WL is a write log that stores records in segment files. // It must be read from start to end once before logging new data. // If an error occurs during read, the repair procedure must be called @@ -185,8 +206,9 @@ type WL struct { stopc chan chan struct{} actorc chan func() closed bool // To allow calling Close() more than once without blocking. - compress bool - snappyBuf []byte + compress CompressionType + compressBuf []byte + zstdWriter *zstd.Encoder WriteNotified WriteNotified @@ -265,13 +287,13 @@ func newWLMetrics(w *WL, r prometheus.Registerer) *wlMetrics { } // New returns a new WAL over the given directory. -func New(logger log.Logger, reg prometheus.Registerer, dir string, compress bool) (*WL, error) { +func New(logger log.Logger, reg prometheus.Registerer, dir string, compress CompressionType) (*WL, error) { return NewSize(logger, reg, dir, DefaultSegmentSize, compress) } // NewSize returns a new write log over the given directory. // New segments are created with the specified size. -func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress bool) (*WL, error) { +func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress CompressionType) (*WL, error) { if segmentSize%pageSize != 0 { return nil, errors.New("invalid segment size") } @@ -281,6 +303,16 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi if logger == nil { logger = log.NewNopLogger() } + + var zstdWriter *zstd.Encoder + if compress == CompressionZstd { + var err error + zstdWriter, err = zstd.NewWriter(nil) + if err != nil { + return nil, err + } + } + w := &WL{ dir: dir, logger: logger, @@ -289,6 +321,7 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi actorc: make(chan func(), 100), stopc: make(chan chan struct{}), compress: compress, + zstdWriter: zstdWriter, } prefix := "prometheus_tsdb_wal_" if filepath.Base(dir) == WblDirName { @@ -327,16 +360,22 @@ func Open(logger log.Logger, dir string) (*WL, error) { if logger == nil { logger = log.NewNopLogger() } + zstdWriter, err := zstd.NewWriter(nil) + if err != nil { + return nil, err + } + w := &WL{ - dir: dir, - logger: logger, + dir: dir, + logger: logger, + zstdWriter: zstdWriter, } return w, nil } -// CompressionEnabled returns if compression is enabled on this WAL. -func (w *WL) CompressionEnabled() bool { +// CompressionType returns if compression is enabled on this WAL. +func (w *WL) CompressionType() CompressionType { return w.compress } @@ -583,9 +622,10 @@ func (w *WL) flushPage(clear bool) error { } // First Byte of header format: -// [ 4 bits unallocated] [1 bit snappy compression flag] [ 3 bit record type ] +// [3 bits unallocated] [1 bit zstd compression flag] [1 bit snappy compression flag] [3 bit record type ] const ( snappyMask = 1 << 3 + zstdMask = 1 << 4 recTypeMask = snappyMask - 1 ) @@ -655,17 +695,23 @@ func (w *WL) log(rec []byte, final bool) error { // Compress the record before calculating if a new segment is needed. compressed := false - if w.compress && - len(rec) > 0 && + if w.compress == CompressionSnappy && len(rec) > 0 { // If MaxEncodedLen is less than 0 the record is too large to be compressed. - snappy.MaxEncodedLen(len(rec)) >= 0 { - // The snappy library uses `len` to calculate if we need a new buffer. - // In order to allocate as few buffers as possible make the length - // equal to the capacity. - w.snappyBuf = w.snappyBuf[:cap(w.snappyBuf)] - w.snappyBuf = snappy.Encode(w.snappyBuf, rec) - if len(w.snappyBuf) < len(rec) { - rec = w.snappyBuf + if len(rec) > 0 && snappy.MaxEncodedLen(len(rec)) >= 0 { + // The snappy library uses `len` to calculate if we need a new buffer. + // In order to allocate as few buffers as possible make the length + // equal to the capacity. + w.compressBuf = w.compressBuf[:cap(w.compressBuf)] + w.compressBuf = snappy.Encode(w.compressBuf, rec) + if len(w.compressBuf) < len(rec) { + rec = w.compressBuf + compressed = true + } + } + } else if w.compress == CompressionZstd && len(rec) > 0 { + w.compressBuf = w.zstdWriter.EncodeAll(rec, w.compressBuf[:0]) + if len(w.compressBuf) < len(rec) { + rec = w.compressBuf compressed = true } } @@ -706,7 +752,11 @@ func (w *WL) log(rec []byte, final bool) error { typ = recMiddle } if compressed { - typ |= snappyMask + if w.compress == CompressionSnappy { + typ |= snappyMask + } else if w.compress == CompressionZstd { + typ |= zstdMask + } } buf[0] = byte(typ) diff --git a/tsdb/wlog/wlog_test.go b/tsdb/wlog/wlog_test.go index 3d208baa38..f9ce225b37 100644 --- a/tsdb/wlog/wlog_test.go +++ b/tsdb/wlog/wlog_test.go @@ -124,7 +124,7 @@ func TestWALRepair_ReadingError(t *testing.T) { // then corrupt a given record in a given segment. // As a result we want a repaired WAL with given intact records. segSize := 3 * pageSize - w, err := NewSize(nil, nil, dir, segSize, false) + w, err := NewSize(nil, nil, dir, segSize, CompressionNone) require.NoError(t, err) var records [][]byte @@ -149,7 +149,7 @@ func TestWALRepair_ReadingError(t *testing.T) { require.NoError(t, f.Close()) - w, err = NewSize(nil, nil, dir, segSize, false) + w, err = NewSize(nil, nil, dir, segSize, CompressionNone) require.NoError(t, err) defer w.Close() @@ -223,7 +223,7 @@ func TestCorruptAndCarryOn(t *testing.T) { // Produce a WAL with a two segments of 3 pages with 3 records each, // so when we truncate the file we're guaranteed to split a record. { - w, err := NewSize(logger, nil, dir, segmentSize, false) + w, err := NewSize(logger, nil, dir, segmentSize, CompressionNone) require.NoError(t, err) for i := 0; i < 18; i++ { @@ -294,7 +294,7 @@ func TestCorruptAndCarryOn(t *testing.T) { err = sr.Close() require.NoError(t, err) - w, err := NewSize(logger, nil, dir, segmentSize, false) + w, err := NewSize(logger, nil, dir, segmentSize, CompressionNone) require.NoError(t, err) err = w.Repair(corruptionErr) @@ -337,7 +337,7 @@ func TestCorruptAndCarryOn(t *testing.T) { // TestClose ensures that calling Close more than once doesn't panic and doesn't block. func TestClose(t *testing.T) { dir := t.TempDir() - w, err := NewSize(nil, nil, dir, pageSize, false) + w, err := NewSize(nil, nil, dir, pageSize, CompressionNone) require.NoError(t, err) require.NoError(t, w.Close()) require.Error(t, w.Close()) @@ -350,7 +350,7 @@ func TestSegmentMetric(t *testing.T) { ) dir := t.TempDir() - w, err := NewSize(nil, nil, dir, segmentSize, false) + w, err := NewSize(nil, nil, dir, segmentSize, CompressionNone) require.NoError(t, err) initialSegment := client_testutil.ToFloat64(w.metrics.currentSegment) @@ -369,7 +369,7 @@ func TestSegmentMetric(t *testing.T) { } func TestCompression(t *testing.T) { - bootstrap := func(compressed bool) string { + bootstrap := func(compressed CompressionType) string { const ( segmentSize = pageSize recordSize = (pageSize / 2) - recordHeaderSize @@ -390,21 +390,27 @@ func TestCompression(t *testing.T) { return dirPath } - dirCompressed := bootstrap(true) + tmpDirs := make([]string, 0, 3) defer func() { - require.NoError(t, os.RemoveAll(dirCompressed)) - }() - dirUnCompressed := bootstrap(false) - defer func() { - require.NoError(t, os.RemoveAll(dirUnCompressed)) + for _, dir := range tmpDirs { + require.NoError(t, os.RemoveAll(dir)) + } }() - uncompressedSize, err := fileutil.DirSize(dirUnCompressed) - require.NoError(t, err) - compressedSize, err := fileutil.DirSize(dirCompressed) - require.NoError(t, err) + dirUnCompressed := bootstrap(CompressionNone) + tmpDirs = append(tmpDirs, dirUnCompressed) - require.Greater(t, float64(uncompressedSize)*0.75, float64(compressedSize), "Compressing zeroes should save at least 25%% space - uncompressedSize: %d, compressedSize: %d", uncompressedSize, compressedSize) + for _, compressionType := range []CompressionType{CompressionSnappy, CompressionZstd} { + dirCompressed := bootstrap(compressionType) + tmpDirs = append(tmpDirs, dirCompressed) + + uncompressedSize, err := fileutil.DirSize(dirUnCompressed) + require.NoError(t, err) + compressedSize, err := fileutil.DirSize(dirCompressed) + require.NoError(t, err) + + require.Greater(t, float64(uncompressedSize)*0.75, float64(compressedSize), "Compressing zeroes should save at least 25%% space - uncompressedSize: %d, compressedSize: %d", uncompressedSize, compressedSize) + } } func TestLogPartialWrite(t *testing.T) { @@ -438,7 +444,7 @@ func TestLogPartialWrite(t *testing.T) { t.Run(testName, func(t *testing.T) { dirPath := t.TempDir() - w, err := NewSize(nil, nil, dirPath, segmentSize, false) + w, err := NewSize(nil, nil, dirPath, segmentSize, CompressionNone) require.NoError(t, err) // Replace the underlying segment file with a mocked one that injects a failure. @@ -505,8 +511,8 @@ func (f *faultySegmentFile) Write(p []byte) (int, error) { } func BenchmarkWAL_LogBatched(b *testing.B) { - for _, compress := range []bool{true, false} { - b.Run(fmt.Sprintf("compress=%t", compress), func(b *testing.B) { + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + b.Run(fmt.Sprintf("compress=%s", compress), func(b *testing.B) { dir := b.TempDir() w, err := New(nil, nil, dir, compress) @@ -535,8 +541,8 @@ func BenchmarkWAL_LogBatched(b *testing.B) { } func BenchmarkWAL_Log(b *testing.B) { - for _, compress := range []bool{true, false} { - b.Run(fmt.Sprintf("compress=%t", compress), func(b *testing.B) { + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + b.Run(fmt.Sprintf("compress=%s", compress), func(b *testing.B) { dir := b.TempDir() w, err := New(nil, nil, dir, compress) From a462f7fa2187edbb6ba2f286efe9fb36c122ed86 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Mon, 27 Mar 2023 14:54:29 -0700 Subject: [PATCH 25/40] Add function for iterating through all buckets in reverse to find max bucket Signed-off-by: Carrie Edwards --- model/histogram/float_histogram.go | 70 ++++++++ model/histogram/float_histogram_test.go | 226 ++++++++++++++++++++++++ 2 files changed, 296 insertions(+) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index f4ee13facc..978045696e 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -622,6 +622,20 @@ func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { } } +// AllReverseBucketIterator returns a BucketIterator to iterate over all negative, +// zero, and positive buckets in descending order (starting at the lowest bucket +// and going up). If the highest negative bucket or the lowest positive bucket +// overlap with the zero bucket, their upper or lower boundary, respectively, is +// set to the zero threshold. +func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] { + return &allReverseFloatBucketIterator{ + h: h, + negIter: h.NegativeBucketIterator(), + posIter: h.PositiveReverseBucketIterator(), + state: 1, + } +} + // zeroCountForLargerThreshold returns what the histogram's zero count would be // if the ZeroThreshold had the provided larger (or equal) value. If the // provided value is less than the histogram's ZeroThreshold, the method panics. @@ -957,3 +971,59 @@ func (i *allFloatBucketIterator) Next() bool { func (i *allFloatBucketIterator) At() Bucket[float64] { return i.currBucket } + +type allReverseFloatBucketIterator struct { + h *FloatHistogram + negIter, posIter BucketIterator[float64] + // 1 means we are iterating positive buckets. + // 0 means it is time for the zero bucket. + // -1 means we are iterating negative buckets. + // Anything else means iteration is over. + state int8 + currBucket Bucket[float64] +} + +func (i *allReverseFloatBucketIterator) Next() bool { + switch i.state { + case 1: + if i.posIter.Next() { + i.currBucket = i.posIter.At() + if i.currBucket.Lower < i.h.ZeroThreshold { + i.currBucket.Lower = i.h.ZeroThreshold + } + return true + } + i.state = 0 + return i.Next() + case 0: + i.state = -1 + if i.h.ZeroCount > 0 { + i.currBucket = Bucket[float64]{ + Lower: -i.h.ZeroThreshold, + Upper: i.h.ZeroThreshold, + LowerInclusive: true, + UpperInclusive: true, + Count: i.h.ZeroCount, + // Index is irrelevant for the zero bucket. + } + return true + } + return i.Next() + case -1: + if i.negIter.Next() { + i.currBucket = i.negIter.At() + if i.currBucket.Upper > -i.h.ZeroThreshold { + i.currBucket.Upper = -i.h.ZeroThreshold + } + return true + } + i.state = 42 + return false + } + + return false +} + +func (i *allReverseFloatBucketIterator) At() Bucket[float64] { + return i.currBucket +} diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index 242ef4c92c..ce749b7101 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -1979,3 +1979,229 @@ func TestAllFloatBucketIterator(t *testing.T) { }) } } + +func TestAllReverseFloatBucketIterator(t *testing.T) { + cases := []struct { + h FloatHistogram + // To determine the expected buckets. + includeNeg, includeZero, includePos bool + }{ + { + h: FloatHistogram{ + Count: 405, + ZeroCount: 102, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + }, + includeNeg: true, + includeZero: true, + includePos: true, + }, + { + h: FloatHistogram{ + Count: 405, + ZeroCount: 102, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + }, + includeNeg: true, + includeZero: true, + includePos: false, + }, + { + h: FloatHistogram{ + Count: 405, + ZeroCount: 102, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + }, + includeNeg: false, + includeZero: true, + includePos: true, + }, + { + h: FloatHistogram{ + Count: 405, + ZeroCount: 102, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + }, + includeNeg: false, + includeZero: true, + includePos: false, + }, + { + h: FloatHistogram{ + Count: 405, + ZeroCount: 0, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + }, + includeNeg: true, + includeZero: false, + includePos: true, + }, + { + h: FloatHistogram{ + Count: 447, + ZeroCount: 42, + ZeroThreshold: 0.5, // Coinciding with bucket boundary. + Sum: 1008.4, + Schema: 0, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + }, + includeNeg: true, + includeZero: true, + includePos: true, + }, + { + h: FloatHistogram{ + Count: 447, + ZeroCount: 42, + ZeroThreshold: 0.6, // Within the bucket closest to zero. + Sum: 1008.4, + Schema: 0, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + }, + includeNeg: true, + includeZero: true, + includePos: true, + }, + } + + for i, c := range cases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + var expBuckets, actBuckets []Bucket[float64] + + if c.includePos { + it := c.h.PositiveReverseBucketIterator() + for it.Next() { + b := it.At() + if c.includeZero && b.Lower < c.h.ZeroThreshold { + b.Lower = c.h.ZeroThreshold + } + expBuckets = append(expBuckets, b) + } + } + if c.includeZero { + expBuckets = append(expBuckets, Bucket[float64]{ + Lower: -c.h.ZeroThreshold, + Upper: c.h.ZeroThreshold, + LowerInclusive: true, + UpperInclusive: true, + Count: c.h.ZeroCount, + }) + } + if c.includeNeg { + it := c.h.NegativeBucketIterator() + for it.Next() { + b := it.At() + if c.includeZero && b.Upper > -c.h.ZeroThreshold { + b.Upper = -c.h.ZeroThreshold + } + expBuckets = append(expBuckets, b) + } + } + + it := c.h.AllReverseBucketIterator() + for it.Next() { + actBuckets = append(actBuckets, it.At()) + } + + require.Equal(t, expBuckets, actBuckets) + }) + } +} From bc0ee4a469978d0e12486aaab6ad3720c40384a2 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Mon, 27 Mar 2023 14:55:06 -0700 Subject: [PATCH 26/40] Implement native histogram min and max query functions Signed-off-by: Carrie Edwards --- promql/functions.go | 62 ++++++++++++++++++++++++++++++++++++++ promql/parser/functions.go | 10 ++++++ 2 files changed, 72 insertions(+) diff --git a/promql/functions.go b/promql/functions.go index 96bffab96d..179d8b23e5 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -996,6 +996,66 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod return enh.Out } +// === histogram_min(Vector parser.ValueTypeVector) Vector === +func funcHistogramMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + + min := math.NaN() // initialize to NaN in case histogram is empty + + it := sample.H.AllBucketIterator() // AllBucketIterator starts at the lowest bucket in the native histogram + for it.Next() { + bucket := it.At() + // Find the lower limit of the lowest populated bucket + if bucket.Count > 0 { + min = bucket.Lower + break + } + } + + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + Point: Point{V: min}, + }) + } + return enh.Out +} + +// === histogram_max(Vector parser.ValueTypeVector) Vector === +func funcHistogramMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + + max := math.NaN() // initialize to NaN in case histogram is empty + + it := sample.H.AllReverseBucketIterator() // AllReverseBucketIterator starts at the highest bucket in the native histogram + for it.Next() { + bucket := it.At() + // Find the upper limit of the highest populated bucket + if bucket.Count > 0 { + max = bucket.Upper + break + } + } + + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + Point: Point{V: max}, + }) + } + return enh.Out +} + // === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { lower := vals[0].(Vector)[0].F @@ -1375,6 +1435,8 @@ var FunctionCalls = map[string]FunctionCall{ "floor": funcFloor, "histogram_count": funcHistogramCount, "histogram_fraction": funcHistogramFraction, + "histogram_max": funcHistogramMax, + "histogram_min": funcHistogramMin, "histogram_quantile": funcHistogramQuantile, "histogram_sum": funcHistogramSum, "holt_winters": funcHoltWinters, diff --git a/promql/parser/functions.go b/promql/parser/functions.go index 479c7f635d..7c337412f8 100644 --- a/promql/parser/functions.go +++ b/promql/parser/functions.go @@ -173,6 +173,16 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeVector}, ReturnType: ValueTypeVector, }, + "histogram_min": { + Name: "histogram_min", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "histogram_max": { + Name: "histogram_max", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, "histogram_fraction": { Name: "histogram_fraction", ArgTypes: []ValueType{ValueTypeScalar, ValueTypeScalar, ValueTypeVector}, From 2f9bc98b8afb389c5582fcf750bc1ebe09b5c5f4 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Mon, 27 Mar 2023 14:55:33 -0700 Subject: [PATCH 27/40] Add tests for min and max functions Signed-off-by: Carrie Edwards --- promql/engine_test.go | 206 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 206 insertions(+) diff --git a/promql/engine_test.go b/promql/engine_test.go index 5ffebc202d..b5cc28c213 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -3295,6 +3295,212 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { } } +func TestNativeHistogram_HistogramMinAndMax(t *testing.T) { + // TODO(carrieedwards): Integrate histograms into the PromQL testing framework + // and write more tests there. + cases := []struct { + text string + // Histogram to test. + h *histogram.Histogram + // Expected + expectedMin float64 + expectedMax float64 + }{ + { + text: "all negative buckets", + h: &histogram.Histogram{ + Count: 12, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{2, 1, -2, 3}, + }, + expectedMin: -16, + expectedMax: -0.5, + }, + { + text: "all positive buckets", + h: &histogram.Histogram{ + Count: 12, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -2, 3}, + }, + expectedMin: 0.5, + expectedMax: 16, + }, + { + text: "all negative buckets", + h: &histogram.Histogram{ + Count: 12, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{2, 1, -2, 3}, + }, + expectedMin: -16, + expectedMax: -0.5, + }, + { + text: "both positive and negative buckets", + h: &histogram.Histogram{ + Count: 24, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -2, 3}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{2, 1, -2, 3}, + }, + expectedMin: -16, + expectedMax: 16, + }, + { + text: "all positive buckets with zero bucket count", + h: &histogram.Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -2, 3}, + }, + expectedMin: -0.001, + expectedMax: 16, + }, + { + text: "all negative buckets with zero bucket count", + h: &histogram.Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{2, 1, -2, 3}, + }, + expectedMin: -16, + expectedMax: 0.001, + }, + { + text: "both positive and negative buckets with zero bucket count", + h: &histogram.Histogram{ + Count: 24, + ZeroCount: 4, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -2, 3}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{2, 1, -2, 3}, + }, + expectedMin: -16, + expectedMax: 16, + }, + { + text: "empty histogram", + h: &histogram.Histogram{}, + expectedMin: math.NaN(), + expectedMax: math.NaN(), + }, + } + + test, err := NewTest(t, "") + require.NoError(t, err) + t.Cleanup(test.Close) + idx := int64(0) + for _, floatHisto := range []bool{true, false} { + for _, c := range cases { + t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) { + seriesName := "sparse_histogram_series" + lbls := labels.FromStrings("__name__", seriesName) + engine := test.QueryEngine() + + ts := idx * int64(10*time.Minute/time.Millisecond) + app := test.Storage().Appender(context.TODO()) + if floatHisto { + _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat()) + } else { + _, err = app.AppendHistogram(0, lbls, ts, c.h, nil) + } + require.NoError(t, err) + require.NoError(t, app.Commit()) + + queryString := fmt.Sprintf("histogram_min(%s)", seriesName) + qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res := qry.Exec(test.Context()) + require.NoError(t, res.Err) + + vector, err := res.Vector() + require.NoError(t, err) + + require.Len(t, vector, 1) + require.Nil(t, vector[0].H) + if math.IsNaN(c.expectedMin) { + require.True(t, math.IsNaN(vector[0].V)) + } else { + require.Equal(t, float64(c.expectedMin), vector[0].V) + } + + queryString = fmt.Sprintf("histogram_max(%s)", seriesName) + qry, err = engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res = qry.Exec(test.Context()) + require.NoError(t, res.Err) + + vector, err = res.Vector() + require.NoError(t, err) + + require.Len(t, vector, 1) + require.Nil(t, vector[0].H) + if math.IsNaN(c.expectedMax) { + require.True(t, math.IsNaN(vector[0].V)) + } else { + require.Equal(t, c.expectedMax, vector[0].V) + } + idx++ + }) + } + } +} + func TestNativeHistogram_HistogramQuantile(t *testing.T) { // TODO(codesome): Integrate histograms into the PromQL testing framework // and write more tests there. From f93ac97867d1972a38ac1647794d024506bb6be5 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Mon, 27 Mar 2023 14:55:51 -0700 Subject: [PATCH 28/40] Update querying function docs Signed-off-by: Carrie Edwards --- docs/querying/functions.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index e1a0b4a769..57b7953b91 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -200,6 +200,28 @@ observed values (in this case corresponding to “average request duration”): / histogram_count(rate(http_request_duration_seconds[10m])) +## `histogram_min()` + +_This function only acts on native histograms, which are an experimental +feature. The behavior of this function may change in future versions of +Prometheus, including its removal from PromQL._ + +`histogram_min(v instant-vector)` returns the estimated minimum value stored in +a native histogram. This estimation is based on the lower boundary of the lowest +bucket that contains values in the native histogram. Samples that are not native +histograms are ignored and do not show up in the returned vector. + +## `histogram_max()` + +_This function only acts on native histograms, which are an experimental +feature. The behavior of this function may change in future versions of +Prometheus, including its removal from PromQL._ + +`histogram_max(v instant-vector)` returns the estimated maximum value stored in +a native histogram. This estimation is based on the upper boundary of the highest +bucket that contains values in the native histogram. Samples that are not native +histograms are ignored and do not show up in the returned vector. + ## `histogram_fraction()` _This function only acts on native histograms, which are an experimental From 42d9169ba1f6e3fa808efd9cc733313723f498d7 Mon Sep 17 00:00:00 2001 From: Ziqi Zhao Date: Wed, 5 Jul 2023 19:05:53 +0800 Subject: [PATCH 29/40] enhance histogram_quantile to get min/max value Signed-off-by: Ziqi Zhao --- docs/querying/functions.md | 28 +--- model/histogram/float_histogram.go | 96 ++++---------- promql/engine_test.go | 206 ----------------------------- promql/functions.go | 62 --------- promql/parser/functions.go | 10 -- promql/quantile.go | 26 +++- 6 files changed, 52 insertions(+), 376 deletions(-) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index 57b7953b91..55ed92ecc8 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -200,28 +200,6 @@ observed values (in this case corresponding to “average request duration”): / histogram_count(rate(http_request_duration_seconds[10m])) -## `histogram_min()` - -_This function only acts on native histograms, which are an experimental -feature. The behavior of this function may change in future versions of -Prometheus, including its removal from PromQL._ - -`histogram_min(v instant-vector)` returns the estimated minimum value stored in -a native histogram. This estimation is based on the lower boundary of the lowest -bucket that contains values in the native histogram. Samples that are not native -histograms are ignored and do not show up in the returned vector. - -## `histogram_max()` - -_This function only acts on native histograms, which are an experimental -feature. The behavior of this function may change in future versions of -Prometheus, including its removal from PromQL._ - -`histogram_max(v instant-vector)` returns the estimated maximum value stored in -a native histogram. This estimation is based on the upper boundary of the highest -bucket that contains values in the native histogram. Samples that are not native -histograms are ignored and do not show up in the returned vector. - ## `histogram_fraction()` _This function only acts on native histograms, which are an experimental @@ -339,6 +317,12 @@ bound of that bucket is greater than bucket. Otherwise, the upper bound of the lowest bucket is returned for quantiles located in the lowest bucket. +You can use `histogram_quantile(0, v instant-vector)` to get the estimated minimum value stored in +a histogram. + +You can use `histogram_quantile(1, v instant-vector)` to get the estimated maximum value stored in +a histogram. + ## `holt_winters()` diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 978045696e..2ced090167 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -615,10 +615,10 @@ func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] // set to the zero threshold. func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { return &allFloatBucketIterator{ - h: h, - negIter: h.NegativeReverseBucketIterator(), - posIter: h.PositiveBucketIterator(), - state: -1, + h: h, + leftIter: h.NegativeReverseBucketIterator(), + rightIter: h.PositiveBucketIterator(), + state: -1, } } @@ -628,11 +628,11 @@ func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { // overlap with the zero bucket, their upper or lower boundary, respectively, is // set to the zero threshold. func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] { - return &allReverseFloatBucketIterator{ - h: h, - negIter: h.NegativeBucketIterator(), - posIter: h.PositiveReverseBucketIterator(), - state: 1, + return &allFloatBucketIterator{ + h: h, + leftIter: h.PositiveReverseBucketIterator(), + rightIter: h.NegativeBucketIterator(), + state: -1, } } @@ -917,8 +917,8 @@ func (i *reverseFloatBucketIterator) Next() bool { } type allFloatBucketIterator struct { - h *FloatHistogram - negIter, posIter BucketIterator[float64] + h *FloatHistogram + leftIter, rightIter BucketIterator[float64] // -1 means we are iterating negative buckets. // 0 means it is time for the zero bucket. // 1 means we are iterating positive buckets. @@ -930,10 +930,13 @@ type allFloatBucketIterator struct { func (i *allFloatBucketIterator) Next() bool { switch i.state { case -1: - if i.negIter.Next() { - i.currBucket = i.negIter.At() - if i.currBucket.Upper > -i.h.ZeroThreshold { + if i.leftIter.Next() { + i.currBucket = i.leftIter.At() + switch { + case i.currBucket.Upper < 0 && i.currBucket.Upper > -i.h.ZeroThreshold: i.currBucket.Upper = -i.h.ZeroThreshold + case i.currBucket.Lower > 0 && i.currBucket.Lower < i.h.ZeroThreshold: + i.currBucket.Lower = i.h.ZeroThreshold } return true } @@ -954,10 +957,13 @@ func (i *allFloatBucketIterator) Next() bool { } return i.Next() case 1: - if i.posIter.Next() { - i.currBucket = i.posIter.At() - if i.currBucket.Lower < i.h.ZeroThreshold { + if i.rightIter.Next() { + i.currBucket = i.rightIter.At() + switch { + case i.currBucket.Lower > 0 && i.currBucket.Lower < i.h.ZeroThreshold: i.currBucket.Lower = i.h.ZeroThreshold + case i.currBucket.Upper < 0 && i.currBucket.Upper > -i.h.ZeroThreshold: + i.currBucket.Upper = -i.h.ZeroThreshold } return true } @@ -971,59 +977,3 @@ func (i *allFloatBucketIterator) Next() bool { func (i *allFloatBucketIterator) At() Bucket[float64] { return i.currBucket } - -type allReverseFloatBucketIterator struct { - h *FloatHistogram - negIter, posIter BucketIterator[float64] - // 1 means we are iterating positive buckets. - // 0 means it is time for the zero bucket. - // -1 means we are iterating negative buckets. - // Anything else means iteration is over. - state int8 - currBucket Bucket[float64] -} - -func (i *allReverseFloatBucketIterator) Next() bool { - switch i.state { - case 1: - if i.posIter.Next() { - i.currBucket = i.posIter.At() - if i.currBucket.Lower < i.h.ZeroThreshold { - i.currBucket.Lower = i.h.ZeroThreshold - } - return true - } - i.state = 0 - return i.Next() - case 0: - i.state = -1 - if i.h.ZeroCount > 0 { - i.currBucket = Bucket[float64]{ - Lower: -i.h.ZeroThreshold, - Upper: i.h.ZeroThreshold, - LowerInclusive: true, - UpperInclusive: true, - Count: i.h.ZeroCount, - // Index is irrelevant for the zero bucket. - } - return true - } - return i.Next() - case -1: - if i.negIter.Next() { - i.currBucket = i.negIter.At() - if i.currBucket.Upper > -i.h.ZeroThreshold { - i.currBucket.Upper = -i.h.ZeroThreshold - } - return true - } - i.state = 42 - return false - } - - return false -} - -func (i *allReverseFloatBucketIterator) At() Bucket[float64] { - return i.currBucket -} diff --git a/promql/engine_test.go b/promql/engine_test.go index b5cc28c213..5ffebc202d 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -3295,212 +3295,6 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { } } -func TestNativeHistogram_HistogramMinAndMax(t *testing.T) { - // TODO(carrieedwards): Integrate histograms into the PromQL testing framework - // and write more tests there. - cases := []struct { - text string - // Histogram to test. - h *histogram.Histogram - // Expected - expectedMin float64 - expectedMax float64 - }{ - { - text: "all negative buckets", - h: &histogram.Histogram{ - Count: 12, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - expectedMin: -16, - expectedMax: -0.5, - }, - { - text: "all positive buckets", - h: &histogram.Histogram{ - Count: 12, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, - }, - expectedMin: 0.5, - expectedMax: 16, - }, - { - text: "all negative buckets", - h: &histogram.Histogram{ - Count: 12, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - expectedMin: -16, - expectedMax: -0.5, - }, - { - text: "both positive and negative buckets", - h: &histogram.Histogram{ - Count: 24, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - expectedMin: -16, - expectedMax: 16, - }, - { - text: "all positive buckets with zero bucket count", - h: &histogram.Histogram{ - Count: 12, - ZeroCount: 2, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, - }, - expectedMin: -0.001, - expectedMax: 16, - }, - { - text: "all negative buckets with zero bucket count", - h: &histogram.Histogram{ - Count: 12, - ZeroCount: 2, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - expectedMin: -16, - expectedMax: 0.001, - }, - { - text: "both positive and negative buckets with zero bucket count", - h: &histogram.Histogram{ - Count: 24, - ZeroCount: 4, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - expectedMin: -16, - expectedMax: 16, - }, - { - text: "empty histogram", - h: &histogram.Histogram{}, - expectedMin: math.NaN(), - expectedMax: math.NaN(), - }, - } - - test, err := NewTest(t, "") - require.NoError(t, err) - t.Cleanup(test.Close) - idx := int64(0) - for _, floatHisto := range []bool{true, false} { - for _, c := range cases { - t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) { - seriesName := "sparse_histogram_series" - lbls := labels.FromStrings("__name__", seriesName) - engine := test.QueryEngine() - - ts := idx * int64(10*time.Minute/time.Millisecond) - app := test.Storage().Appender(context.TODO()) - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat()) - } else { - _, err = app.AppendHistogram(0, lbls, ts, c.h, nil) - } - require.NoError(t, err) - require.NoError(t, app.Commit()) - - queryString := fmt.Sprintf("histogram_min(%s)", seriesName) - qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(test.Context()) - require.NoError(t, res.Err) - - vector, err := res.Vector() - require.NoError(t, err) - - require.Len(t, vector, 1) - require.Nil(t, vector[0].H) - if math.IsNaN(c.expectedMin) { - require.True(t, math.IsNaN(vector[0].V)) - } else { - require.Equal(t, float64(c.expectedMin), vector[0].V) - } - - queryString = fmt.Sprintf("histogram_max(%s)", seriesName) - qry, err = engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res = qry.Exec(test.Context()) - require.NoError(t, res.Err) - - vector, err = res.Vector() - require.NoError(t, err) - - require.Len(t, vector, 1) - require.Nil(t, vector[0].H) - if math.IsNaN(c.expectedMax) { - require.True(t, math.IsNaN(vector[0].V)) - } else { - require.Equal(t, c.expectedMax, vector[0].V) - } - idx++ - }) - } - } -} - func TestNativeHistogram_HistogramQuantile(t *testing.T) { // TODO(codesome): Integrate histograms into the PromQL testing framework // and write more tests there. diff --git a/promql/functions.go b/promql/functions.go index 179d8b23e5..96bffab96d 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -996,66 +996,6 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod return enh.Out } -// === histogram_min(Vector parser.ValueTypeVector) Vector === -func funcHistogramMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - inVec := vals[0].(Vector) - - for _, sample := range inVec { - // Skip non-histogram samples. - if sample.H == nil { - continue - } - - min := math.NaN() // initialize to NaN in case histogram is empty - - it := sample.H.AllBucketIterator() // AllBucketIterator starts at the lowest bucket in the native histogram - for it.Next() { - bucket := it.At() - // Find the lower limit of the lowest populated bucket - if bucket.Count > 0 { - min = bucket.Lower - break - } - } - - enh.Out = append(enh.Out, Sample{ - Metric: enh.DropMetricName(sample.Metric), - Point: Point{V: min}, - }) - } - return enh.Out -} - -// === histogram_max(Vector parser.ValueTypeVector) Vector === -func funcHistogramMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - inVec := vals[0].(Vector) - - for _, sample := range inVec { - // Skip non-histogram samples. - if sample.H == nil { - continue - } - - max := math.NaN() // initialize to NaN in case histogram is empty - - it := sample.H.AllReverseBucketIterator() // AllReverseBucketIterator starts at the highest bucket in the native histogram - for it.Next() { - bucket := it.At() - // Find the upper limit of the highest populated bucket - if bucket.Count > 0 { - max = bucket.Upper - break - } - } - - enh.Out = append(enh.Out, Sample{ - Metric: enh.DropMetricName(sample.Metric), - Point: Point{V: max}, - }) - } - return enh.Out -} - // === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { lower := vals[0].(Vector)[0].F @@ -1435,8 +1375,6 @@ var FunctionCalls = map[string]FunctionCall{ "floor": funcFloor, "histogram_count": funcHistogramCount, "histogram_fraction": funcHistogramFraction, - "histogram_max": funcHistogramMax, - "histogram_min": funcHistogramMin, "histogram_quantile": funcHistogramQuantile, "histogram_sum": funcHistogramSum, "holt_winters": funcHoltWinters, diff --git a/promql/parser/functions.go b/promql/parser/functions.go index 7c337412f8..479c7f635d 100644 --- a/promql/parser/functions.go +++ b/promql/parser/functions.go @@ -173,16 +173,6 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeVector}, ReturnType: ValueTypeVector, }, - "histogram_min": { - Name: "histogram_min", - ArgTypes: []ValueType{ValueTypeVector}, - ReturnType: ValueTypeVector, - }, - "histogram_max": { - Name: "histogram_max", - ArgTypes: []ValueType{ValueTypeVector}, - ReturnType: ValueTypeVector, - }, "histogram_fraction": { Name: "histogram_fraction", ArgTypes: []ValueType{ValueTypeScalar, ValueTypeScalar, ValueTypeVector}, diff --git a/promql/quantile.go b/promql/quantile.go index d80345e817..793a6629f1 100644 --- a/promql/quantile.go +++ b/promql/quantile.go @@ -158,9 +158,21 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { var ( bucket histogram.Bucket[float64] count float64 - it = h.AllBucketIterator() - rank = q * h.Count + it histogram.BucketIterator[float64] + rank float64 ) + + // if there are NaN observations in the histogram (h.Sum is NaN), use the forward iterator + // if the q < 0.5, use the forward iterator + // if the q >= 0.5, use the reverse iterator + if math.IsNaN(h.Sum) || q < 0.5 { + it = h.AllBucketIterator() + rank = q * h.Count + } else { + it = h.AllReverseBucketIterator() + rank = (1 - q) * h.Count + } + for it.Next() { bucket = it.At() count += bucket.Count @@ -193,7 +205,15 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { return bucket.Upper } - rank -= count - bucket.Count + // if there are NaN observations in the histogram (h.Sum is NaN), use the forward iterator + // if the q < 0.5, use the forward iterator + // if the q >= 0.5, use the reverse iterator + if math.IsNaN(h.Sum) || q < 0.5 { + rank -= count - bucket.Count + } else { + rank = count - rank + } + // TODO(codesome): Use a better estimation than linear. return bucket.Lower + (bucket.Upper-bucket.Lower)*(rank/bucket.Count) } From 162612ea8627447be0b8fa170268b76524124bbc Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 12 Jul 2023 14:52:49 +0200 Subject: [PATCH 30/40] histograms: Improve comment Oversight during review of #12525. Signed-off-by: beorn7 --- promql/quantile.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/promql/quantile.go b/promql/quantile.go index 793a6629f1..7f48b5945c 100644 --- a/promql/quantile.go +++ b/promql/quantile.go @@ -205,9 +205,10 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { return bucket.Upper } - // if there are NaN observations in the histogram (h.Sum is NaN), use the forward iterator - // if the q < 0.5, use the forward iterator - // if the q >= 0.5, use the reverse iterator + // NaN observations increase h.Count but not the total number of + // observations in the buckets. Therefore, we have to use the forward + // iterator to find percentiles. We recognize histograms containing NaN + // observations by checking if their h.Sum is NaN. if math.IsNaN(h.Sum) || q < 0.5 { rank -= count - bucket.Count } else { From 1c3bd04beae91e7abae55903c814b67869cd7186 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 12 Jul 2023 18:17:35 +0200 Subject: [PATCH 31/40] histograms: Modify test to expose bug #12552 Signed-off-by: beorn7 --- model/textparse/protobufparse_test.go | 297 +++++++++++++++++++++++--- 1 file changed, 263 insertions(+), 34 deletions(-) diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index 882cce59d3..88ad9f2213 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -408,6 +408,61 @@ metric: < > > +`, + `name: "test_histogram_family" +help: "Test histogram metric family with two very simple histograms." +type: HISTOGRAM +metric: < + label: < + name: "foo" + value: "bar" + > + histogram: < + sample_count: 5 + sample_sum: 12.1 + bucket: < + cumulative_count: 2 + upper_bound: 1.1 + > + bucket: < + cumulative_count: 3 + upper_bound: 2.2 + > + schema: 3 + positive_span: < + offset: 8 + length: 2 + > + positive_delta: 2 + positive_delta: 1 + > +> +metric: < + label: < + name: "foo" + value: "baz" + > + histogram: < + sample_count: 6 + sample_sum: 13.1 + bucket: < + cumulative_count: 1 + upper_bound: 1.1 + > + bucket: < + cumulative_count: 5 + upper_bound: 2.2 + > + schema: 3 + positive_span: < + offset: 8 + length: 2 + > + positive_delta: 1 + positive_delta: 4 + > +> + `, `name: "rpc_durations_seconds" help: "RPC latency distributions." @@ -751,6 +806,50 @@ func TestProtobufParse(t *testing.T) { "le", "+Inf", ), }, + { + m: "test_histogram_family", + help: "Test histogram metric family with two very simple histograms.", + }, + { + m: "test_histogram_family", + typ: MetricTypeHistogram, + }, + { + m: "test_histogram_family\xfffoo\xffbar", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 5, + Sum: 12.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "foo", "bar", + ), + }, + { + m: "test_histogram_family\xfffoo\xffbaz", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 6, + Sum: 13.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{1, 4}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "foo", "baz", + ), + }, { m: "rpc_durations_seconds", help: "RPC latency distributions.", @@ -1321,14 +1420,144 @@ func TestProtobufParse(t *testing.T) { ), }, { // 53 + m: "test_histogram_family", + help: "Test histogram metric family with two very simple histograms.", + }, + { // 54 + m: "test_histogram_family", + typ: MetricTypeHistogram, + }, + { // 55 + m: "test_histogram_family\xfffoo\xffbar", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 5, + Sum: 12.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "foo", "bar", + ), + }, + { // 56 + m: "test_histogram_family_count\xfffoo\xffbar", + v: 5, + lset: labels.FromStrings( + "__name__", "test_histogram_family_count", + "foo", "bar", + ), + }, + { // 57 + m: "test_histogram_family_sum\xfffoo\xffbar", + v: 12.1, + lset: labels.FromStrings( + "__name__", "test_histogram_family_sum", + "foo", "bar", + ), + }, + { // 58 + m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff1.1", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "bar", + "le", "1.1", + ), + }, + { // 59 + m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff2.2", + v: 3, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "bar", + "le", "2.2", + ), + }, + { // 60 + m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff+Inf", + v: 5, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "bar", + "le", "+Inf", + ), + }, + { // 61 + m: "test_histogram_family\xfffoo\xffbaz", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 6, + Sum: 13.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{1, 4}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "foo", "baz", + ), + }, + { // 62 + m: "test_histogram_family_count\xfffoo\xffbaz", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram_family_count", + "foo", "baz", + ), + }, + { // 63 + m: "test_histogram_family_sum\xfffoo\xffbaz", + v: 13.1, + lset: labels.FromStrings( + "__name__", "test_histogram_family_sum", + "foo", "baz", + ), + }, + { // 64 + m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff1.1", + v: 1, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "baz", + "le", "1.1", + ), + }, + { // 65 + m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff2.2", + v: 5, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "baz", + "le", "2.2", + ), + }, + { // 66 + m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff+Inf", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "baz", + "le", "+Inf", + ), + }, + { // 67 m: "rpc_durations_seconds", help: "RPC latency distributions.", }, - { // 54 + { // 68 m: "rpc_durations_seconds", typ: MetricTypeSummary, }, - { // 55 + { // 69 m: "rpc_durations_seconds_count\xffservice\xffexponential", v: 262, lset: labels.FromStrings( @@ -1336,7 +1565,7 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 56 + { // 70 m: "rpc_durations_seconds_sum\xffservice\xffexponential", v: 0.00025551262820703587, lset: labels.FromStrings( @@ -1344,7 +1573,7 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 57 + { // 71 m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5", v: 6.442786329648548e-07, lset: labels.FromStrings( @@ -1353,7 +1582,7 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 58 + { // 72 m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9", v: 1.9435742936658396e-06, lset: labels.FromStrings( @@ -1362,7 +1591,7 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 59 + { // 73 m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99", v: 4.0471608667037015e-06, lset: labels.FromStrings( @@ -1371,22 +1600,22 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 60 + { // 74 m: "without_quantiles", help: "A summary without quantiles.", }, - { // 61 + { // 75 m: "without_quantiles", typ: MetricTypeSummary, }, - { // 62 + { // 76 m: "without_quantiles_count", v: 42, lset: labels.FromStrings( "__name__", "without_quantiles_count", ), }, - { // 63 + { // 77 m: "without_quantiles_sum", v: 1.234, lset: labels.FromStrings( @@ -1420,61 +1649,61 @@ func TestProtobufParse(t *testing.T) { var e exemplar.Exemplar p.Metric(&res) found := p.Exemplar(&e) - require.Equal(t, exp[i].m, string(m)) + require.Equal(t, exp[i].m, string(m), "i: %d", i) if ts != nil { - require.Equal(t, exp[i].t, *ts) + require.Equal(t, exp[i].t, *ts, "i: %d", i) } else { - require.Equal(t, exp[i].t, int64(0)) + require.Equal(t, exp[i].t, int64(0), "i: %d", i) } - require.Equal(t, exp[i].v, v) - require.Equal(t, exp[i].lset, res) + require.Equal(t, exp[i].v, v, "i: %d", i) + require.Equal(t, exp[i].lset, res, "i: %d", i) if len(exp[i].e) == 0 { - require.Equal(t, false, found) + require.Equal(t, false, found, "i: %d", i) } else { - require.Equal(t, true, found) - require.Equal(t, exp[i].e[0], e) + require.Equal(t, true, found, "i: %d", i) + require.Equal(t, exp[i].e[0], e, "i: %d", i) } case EntryHistogram: m, ts, shs, fhs := p.Histogram() p.Metric(&res) - require.Equal(t, exp[i].m, string(m)) + require.Equal(t, exp[i].m, string(m), "i: %d", i) if ts != nil { - require.Equal(t, exp[i].t, *ts) + require.Equal(t, exp[i].t, *ts, "i: %d", i) } else { - require.Equal(t, exp[i].t, int64(0)) + require.Equal(t, exp[i].t, int64(0), "i: %d", i) } - require.Equal(t, exp[i].lset, res) - require.Equal(t, exp[i].m, string(m)) + require.Equal(t, exp[i].lset, res, "i: %d", i) + require.Equal(t, exp[i].m, string(m), "i: %d", i) if shs != nil { - require.Equal(t, exp[i].shs, shs) + require.Equal(t, exp[i].shs, shs, "i: %d", i) } else { - require.Equal(t, exp[i].fhs, fhs) + require.Equal(t, exp[i].fhs, fhs, "i: %d", i) } j := 0 for e := (exemplar.Exemplar{}); p.Exemplar(&e); j++ { - require.Equal(t, exp[i].e[j], e) + require.Equal(t, exp[i].e[j], e, "i: %d", i) e = exemplar.Exemplar{} } - require.Equal(t, len(exp[i].e), j, "not enough exemplars found") + require.Equal(t, len(exp[i].e), j, "not enough exemplars found, i: %d", i) case EntryType: m, typ := p.Type() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].typ, typ) + require.Equal(t, exp[i].m, string(m), "i: %d", i) + require.Equal(t, exp[i].typ, typ, "i: %d", i) case EntryHelp: m, h := p.Help() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].help, string(h)) + require.Equal(t, exp[i].m, string(m), "i: %d", i) + require.Equal(t, exp[i].help, string(h), "i: %d", i) case EntryUnit: m, u := p.Unit() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].unit, string(u)) + require.Equal(t, exp[i].m, string(m), "i: %d", i) + require.Equal(t, exp[i].unit, string(u), "i: %d", i) case EntryComment: - require.Equal(t, exp[i].comment, string(p.Comment())) + require.Equal(t, exp[i].comment, string(p.Comment()), "i: %d", i) } i++ From da047c6857850e315543e73272312a368ceaffbe Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 12 Jul 2023 18:42:02 +0200 Subject: [PATCH 32/40] histograms: Fix bug #12552 The problem was the following: When trying to parse native histograms and classic histograms in parallel, the parser would first parse the histogram proto messages as a native histogram and then parse the same message again, but now as a classic histogram. Afterwards, it would forget that it was dealing with a metric family that contains native histograms and would parse the rest of the metric family as classic histograms only. The fix is to check again after being done with a classic histogram. Signed-off-by: beorn7 --- model/textparse/protobufparse.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index b831251ad0..2ef52da5c4 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -54,7 +54,7 @@ type ProtobufParser struct { // quantiles/buckets. fieldPos int fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. - redoClassic bool // true after parsing a native histogram if we need to parse it again as a classit histogram. + redoClassic bool // true after parsing a native histogram if we need to parse it again as a classic histogram. // state is marked by the entry we are processing. EntryInvalid implies // that we have to decode the next MetricFamily. @@ -411,6 +411,14 @@ func (p *ProtobufParser) Next() (Entry, error) { p.metricPos++ p.fieldPos = -2 p.fieldsDone = false + // If this is a metric family containing native + // histograms, we have to switch back to native + // histograms after parsing a classic histogram. + if p.state == EntrySeries && + (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && + isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) { + p.state = EntryHistogram + } } if p.metricPos >= len(p.mf.GetMetric()) { p.state = EntryInvalid From e1ace8d00e09e3a791b2814b635d917116e27914 Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Wed, 12 Jul 2023 21:34:55 +0200 Subject: [PATCH 33/40] Add PromQL format and label matcher set/delete commands to promtool Signed-off-by: Rob Skillington Signed-off-by: Julien Pivotto --- cmd/promtool/main.go | 103 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index f94be8b27f..2b5ee9aee1 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -58,6 +58,7 @@ import ( "github.com/prometheus/prometheus/notifier" _ "github.com/prometheus/prometheus/plugins" // Register plugins. "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/util/documentcli" ) @@ -245,6 +246,22 @@ func main() { "A list of one or more files containing recording rules to be backfilled. All recording rules listed in the files will be backfilled. Alerting rules are not evaluated.", ).Required().ExistingFiles() + promQLCmd := app.Command("promql", "PromQL formatting and editing.") + + promQLFormatCmd := promQLCmd.Command("format", "Format PromQL query to pretty printed form.") + promQLFormatQuery := promQLFormatCmd.Arg("query", "PromQL query.").Required().String() + + promQLLabelsCmd := promQLCmd.Command("label-matchers", "Edit label matchers contained within an existing PromQL query.") + promQLLabelsSetCmd := promQLLabelsCmd.Command("set", "Set a label matcher in the query.") + promQLLabelsSetType := promQLLabelsSetCmd.Flag("type", "Type of the label matcher to set.").Short('t').Default("=").Enum("=", "!=", "=~", "!~") + promQLLabelsSetQuery := promQLLabelsSetCmd.Arg("query", "PromQL query.").Required().String() + promQLLabelsSetName := promQLLabelsSetCmd.Arg("name", "Name of the label matcher to set.").Required().String() + promQLLabelsSetValue := promQLLabelsSetCmd.Arg("value", "Value of the label matcher to set.").Required().String() + + promQLLabelsDeleteCmd := promQLLabelsCmd.Command("delete", "Delete a label from the query.") + promQLLabelsDeleteQuery := promQLLabelsDeleteCmd.Arg("query", "PromQL query.").Required().String() + promQLLabelsDeleteName := promQLLabelsDeleteCmd.Arg("name", "Name of the label to delete.").Required().String() + featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings() documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden() @@ -364,8 +381,18 @@ func main() { case importRulesCmd.FullCommand(): os.Exit(checkErr(importRules(serverURL, httpRoundTripper, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...))) + case documentationCmd.FullCommand(): os.Exit(checkErr(documentcli.GenerateMarkdown(app.Model(), os.Stdout))) + + case promQLFormatCmd.FullCommand(): + os.Exit(checkErr(formatPromQL(*promQLFormatQuery))) + + case promQLLabelsSetCmd.FullCommand(): + os.Exit(checkErr(labelsSetPromQL(*promQLLabelsSetQuery, *promQLLabelsSetType, *promQLLabelsSetName, *promQLLabelsSetValue))) + + case promQLLabelsDeleteCmd.FullCommand(): + os.Exit(checkErr(labelsDeletePromQL(*promQLLabelsDeleteQuery, *promQLLabelsDeleteName))) } } @@ -1375,3 +1402,79 @@ func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *c return nil } + +func formatPromQL(query string) error { + expr, err := parser.ParseExpr(query) + if err != nil { + return err + } + + fmt.Println(expr.Pretty(0)) + return nil +} + +func labelsSetPromQL(query, labelMatchType, name, value string) error { + expr, err := parser.ParseExpr(query) + if err != nil { + return err + } + + var matchType labels.MatchType + switch labelMatchType { + case parser.ItemType(parser.EQL).String(): + matchType = labels.MatchEqual + case parser.ItemType(parser.NEQ).String(): + matchType = labels.MatchNotEqual + case parser.ItemType(parser.EQL_REGEX).String(): + matchType = labels.MatchRegexp + case parser.ItemType(parser.NEQ_REGEX).String(): + matchType = labels.MatchNotRegexp + default: + return fmt.Errorf("invalid label match type: %s", labelMatchType) + } + + parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + if n, ok := node.(*parser.VectorSelector); ok { + var found bool + for i, l := range n.LabelMatchers { + if l.Name == name { + n.LabelMatchers[i].Type = matchType + n.LabelMatchers[i].Value = value + found = true + } + } + if !found { + n.LabelMatchers = append(n.LabelMatchers, &labels.Matcher{ + Type: matchType, + Name: name, + Value: value, + }) + } + } + return nil + }) + + fmt.Println(expr.Pretty(0)) + return nil +} + +func labelsDeletePromQL(query, name string) error { + expr, err := parser.ParseExpr(query) + if err != nil { + return err + } + + parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + if n, ok := node.(*parser.VectorSelector); ok { + for i, l := range n.LabelMatchers { + if l.Name == name { + n.LabelMatchers = append(n.LabelMatchers[:i], n.LabelMatchers[i+1:]...) + } + } + } + return nil + }) + + fmt.Println(expr.Pretty(0)) + return nil +} From b3b669fd9a00d980cb853efa1b8a4912ddeba3bf Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Wed, 12 Jul 2023 22:30:20 +0200 Subject: [PATCH 34/40] Add experimental flag and docs Signed-off-by: Julien Pivotto --- cmd/promtool/main.go | 14 ++++++- docs/command-line/promtool.md | 71 +++++++++++++++++++++++++++++++++++ 2 files changed, 84 insertions(+), 1 deletion(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 2b5ee9aee1..da4b8dc797 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -92,6 +92,8 @@ func main() { checkCmd := app.Command("check", "Check the resources for validity.") + experimental := app.Flag("experimental", "Enable experimental commands.").Bool() + sdCheckCmd := checkCmd.Command("service-discovery", "Perform service discovery for the given job name and report the results, including relabeling.") sdConfigFile := sdCheckCmd.Arg("config-file", "The prometheus config file.").Required().ExistingFile() sdJobName := sdCheckCmd.Arg("job", "The job to run service discovery for.").Required().String() @@ -246,7 +248,7 @@ func main() { "A list of one or more files containing recording rules to be backfilled. All recording rules listed in the files will be backfilled. Alerting rules are not evaluated.", ).Required().ExistingFiles() - promQLCmd := app.Command("promql", "PromQL formatting and editing.") + promQLCmd := app.Command("promql", "PromQL formatting and editing. Requires the --experimental flag.") promQLFormatCmd := promQLCmd.Command("format", "Format PromQL query to pretty printed form.") promQLFormatQuery := promQLFormatCmd.Arg("query", "PromQL query.").Required().String() @@ -386,16 +388,26 @@ func main() { os.Exit(checkErr(documentcli.GenerateMarkdown(app.Model(), os.Stdout))) case promQLFormatCmd.FullCommand(): + checkExperimental(*experimental) os.Exit(checkErr(formatPromQL(*promQLFormatQuery))) case promQLLabelsSetCmd.FullCommand(): + checkExperimental(*experimental) os.Exit(checkErr(labelsSetPromQL(*promQLLabelsSetQuery, *promQLLabelsSetType, *promQLLabelsSetName, *promQLLabelsSetValue))) case promQLLabelsDeleteCmd.FullCommand(): + checkExperimental(*experimental) os.Exit(checkErr(labelsDeletePromQL(*promQLLabelsDeleteQuery, *promQLLabelsDeleteName))) } } +func checkExperimental(f bool) { + if !f { + fmt.Fprintln(os.Stderr, "This command is experimental and requires the --experimental flag to be set.") + os.Exit(1) + } +} + // nolint:revive var lintError = fmt.Errorf("lint error") diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 673e8c0481..587286e105 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -14,6 +14,7 @@ Tooling for the Prometheus monitoring system. | --- | --- | | -h, --help | Show context-sensitive help (also try --help-long and --help-man). | | --version | Show application version. | +| --experimental | Enable experimental commands. | | --enable-feature | Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details. | @@ -30,6 +31,7 @@ Tooling for the Prometheus monitoring system. | push | Push to a Prometheus server. | | test | Unit testing. | | tsdb | Run tsdb commands. | +| promql | PromQL formatting and editing. Requires the --experimental flag. | @@ -609,3 +611,72 @@ Create blocks of data for new recording rules. | rule-files | A list of one or more files containing recording rules to be backfilled. All recording rules listed in the files will be backfilled. Alerting rules are not evaluated. | Yes | + + +### `promtool promql` + +PromQL formatting and editing. Requires the --experimental flag. + + + +##### `promtool promql format` + +Format PromQL query to pretty printed form. + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| query | PromQL query. | Yes | + + + + +##### `promtool promql label-matchers` + +Edit label matchers contained within an existing PromQL query. + + + +##### `promtool promql label-matchers set` + +Set a label matcher in the query. + + + +###### Flags + +| Flag | Description | Default | +| --- | --- | --- | +| -t, --type | Type of the label matcher to set. | `=` | + + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| query | PromQL query. | Yes | +| name | Name of the label matcher to set. | Yes | +| value | Value of the label matcher to set. | Yes | + + + + +##### `promtool promql label-matchers delete` + +Delete a label from the query. + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| query | PromQL query. | Yes | +| name | Name of the label to delete. | Yes | + + From 096ceca44f79a6f6ba9e31f7e5695d84d1cca330 Mon Sep 17 00:00:00 2001 From: cui fliter Date: Thu, 13 Jul 2023 21:53:40 +0800 Subject: [PATCH 35/40] remove repetitive words (#12556) Signed-off-by: cui fliter --- model/histogram/float_histogram.go | 2 +- promql/value.go | 2 +- tsdb/ooo_head_read.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 2ced090167..782b07df6f 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -421,7 +421,7 @@ func addBucket( // receiving histogram, but a pointer to it is returned for convenience. // // The ideal value for maxEmptyBuckets depends on circumstances. The motivation -// to set maxEmptyBuckets > 0 is the assumption that is is less overhead to +// to set maxEmptyBuckets > 0 is the assumption that is less overhead to // represent very few empty buckets explicitly within one span than cutting the // one span into two to treat the empty buckets as a gap between the two spans, // both in terms of storage requirement as well as in terms of encoding and diff --git a/promql/value.go b/promql/value.go index f59a25112b..1b2a9d221d 100644 --- a/promql/value.go +++ b/promql/value.go @@ -214,7 +214,7 @@ func (s Sample) MarshalJSON() ([]byte, error) { return json.Marshal(h) } -// Vector is basically only an an alias for []Sample, but the contract is that +// Vector is basically only an alias for []Sample, but the contract is that // in a Vector, all Samples have the same timestamp. type Vector []Sample diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 8030fc367f..2d683b545a 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -137,7 +137,7 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra // chunks Meta the first chunk that overlaps with others. // Example chunks of a series: 5:(100, 200) 6:(500, 600) 7:(150, 250) 8:(550, 650) // In the example 5 overlaps with 7 and 6 overlaps with 8 so we only want to - // to return chunk Metas for chunk 5 and chunk 6e + // return chunk Metas for chunk 5 and chunk 6e *chks = append(*chks, tmpChks[0]) maxTime := tmpChks[0].MaxTime // Tracks the maxTime of the previous "to be merged chunk". for _, c := range tmpChks[1:] { From fd5b01afdcb1fefad227b9f7667125e02a1395f1 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Thu, 13 Jul 2023 22:26:49 +0200 Subject: [PATCH 36/40] promtool docs: write flags between backtits in help Signed-off-by: Julien Pivotto --- docs/command-line/promtool.md | 2 +- util/documentcli/documentcli.go | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 587286e105..546b200e27 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -615,7 +615,7 @@ Create blocks of data for new recording rules. ### `promtool promql` -PromQL formatting and editing. Requires the --experimental flag. +PromQL formatting and editing. Requires the `--experimental` flag. diff --git a/util/documentcli/documentcli.go b/util/documentcli/documentcli.go index c199d8d9b8..720a7c9c7f 100644 --- a/util/documentcli/documentcli.go +++ b/util/documentcli/documentcli.go @@ -26,6 +26,7 @@ import ( "strings" "github.com/alecthomas/kingpin/v2" + "github.com/grafana/regexp" ) // GenerateMarkdown generates the markdown documentation for an application from @@ -230,6 +231,7 @@ func writeSubcommands(writer io.Writer, level int, modelName string, commands [] if cmd.HelpLong != "" { help = cmd.HelpLong } + help = formatHyphenatedWords(help) if _, err := writer.Write([]byte(fmt.Sprintf("\n\n%s `%s %s`\n\n%s\n\n", strings.Repeat("#", level+1), modelName, cmd.FullCommand, help))); err != nil { return err } @@ -250,3 +252,11 @@ func writeSubcommands(writer io.Writer, level int, modelName string, commands [] } return nil } + +func formatHyphenatedWords(input string) string { + hyphenRegex := regexp.MustCompile(`\B--\w+\b`) + replacer := func(s string) string { + return fmt.Sprintf("`%s`", s) + } + return hyphenRegex.ReplaceAllStringFunc(input, replacer) +} From de89a8c827640923d68ab4677803803ee51a3894 Mon Sep 17 00:00:00 2001 From: Daniel Swarbrick Date: Sun, 16 Jul 2023 19:23:44 +0200 Subject: [PATCH 37/40] Linode SD: cast InstanceSpec values to int64 to avoid overflows InstanceSpec struct members are untyped integers, so they can overflow on 32-bit arch when bit-shifted left. Signed-off-by: Daniel Swarbrick --- discovery/linode/linode.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index 12b9575143..63213c87b2 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -304,10 +304,10 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro linodeLabelGroup: model.LabelValue(instance.Group), linodeLabelHypervisor: model.LabelValue(instance.Hypervisor), linodeLabelBackups: model.LabelValue(backupsStatus), - linodeLabelSpecsDiskBytes: model.LabelValue(fmt.Sprintf("%d", instance.Specs.Disk<<20)), - linodeLabelSpecsMemoryBytes: model.LabelValue(fmt.Sprintf("%d", instance.Specs.Memory<<20)), + linodeLabelSpecsDiskBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Disk)<<20)), + linodeLabelSpecsMemoryBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Memory)<<20)), linodeLabelSpecsVCPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.VCPUs)), - linodeLabelSpecsTransferBytes: model.LabelValue(fmt.Sprintf("%d", instance.Specs.Transfer<<20)), + linodeLabelSpecsTransferBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Transfer)<<20)), } addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10)) From 7aa79657163ca2e085d6cea9c1538267266d2772 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20T=C3=B6lle?= Date: Mon, 17 Jul 2023 14:21:18 +0200 Subject: [PATCH 38/40] build(deps): bump github.com/hetznercloud/hcloud-go to v2.0.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Version 2 introduced a breaking change in the `id` field of all resources. They were changed from `int` to `int64` to make sure that all future numerical IDs are supported on all architectures. You can learn more about this [here](https://docs.hetzner.cloud/#deprecation-notices-%E2%9A%A0%EF%B8%8F) Signed-off-by: Julian Tölle --- discovery/hetzner/hcloud.go | 2 +- discovery/hetzner/hetzner.go | 2 +- go.mod | 12 ++++++------ go.sum | 26 +++++++++++++------------- 4 files changed, 21 insertions(+), 21 deletions(-) diff --git a/discovery/hetzner/hcloud.go b/discovery/hetzner/hcloud.go index 50afdc1ec3..4bcfde8302 100644 --- a/discovery/hetzner/hcloud.go +++ b/discovery/hetzner/hcloud.go @@ -22,7 +22,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/hetznercloud/hcloud-go/hcloud" + "github.com/hetznercloud/hcloud-go/v2/hcloud" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" diff --git a/discovery/hetzner/hetzner.go b/discovery/hetzner/hetzner.go index 084319d959..40b28cc2c9 100644 --- a/discovery/hetzner/hetzner.go +++ b/discovery/hetzner/hetzner.go @@ -20,7 +20,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/hetznercloud/hcloud-go/hcloud" + "github.com/hetznercloud/hcloud-go/v2/hcloud" "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/go.mod b/go.mod index 565456ed96..527cc41546 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/consul/api v1.21.0 github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f - github.com/hetznercloud/hcloud-go v1.47.0 + github.com/hetznercloud/hcloud-go/v2 v2.0.0 github.com/ionos-cloud/sdk-go/v6 v6.1.7 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.12 @@ -64,10 +64,10 @@ require ( go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.2 go.uber.org/goleak v1.2.1 - golang.org/x/net v0.11.0 + golang.org/x/net v0.12.0 golang.org/x/oauth2 v0.9.0 golang.org/x/sync v0.2.0 - golang.org/x/sys v0.9.0 + golang.org/x/sys v0.10.0 golang.org/x/time v0.3.0 golang.org/x/tools v0.9.3 google.golang.org/api v0.114.0 @@ -178,11 +178,11 @@ require ( go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect go.opentelemetry.io/otel/metric v1.16.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect - golang.org/x/crypto v0.10.0 // indirect + golang.org/x/crypto v0.11.0 // indirect golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 golang.org/x/mod v0.10.0 // indirect - golang.org/x/term v0.9.0 // indirect - golang.org/x/text v0.10.0 // indirect + golang.org/x/term v0.10.0 // indirect + golang.org/x/text v0.11.0 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 57f9f82c69..512eca1255 100644 --- a/go.sum +++ b/go.sum @@ -464,8 +464,8 @@ github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f/go.mod h1:Xjd3 github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go v1.47.0 h1:WMZDwLPtMZwOLWIgERHrrrTzRFdHx0hTygYVQ4VWHW4= -github.com/hetznercloud/hcloud-go v1.47.0/go.mod h1:zSpmBnxIdb5oMdbpVg1Q977Cq2qiwERkjj3jqRbHH5U= +github.com/hetznercloud/hcloud-go/v2 v2.0.0 h1:Sg1DJ+MAKvbYAqaBaq9tPbwXBS2ckPIaMtVdUjKu+4g= +github.com/hetznercloud/hcloud-go/v2 v2.0.0/go.mod h1:4iUG2NG8b61IAwNx6UsMWQ6IfIf/i1RsG0BbsKAyR5Q= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -700,7 +700,7 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= @@ -844,8 +844,8 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= -golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -928,8 +928,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= -golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= +golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1021,14 +1021,14 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= -golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= +golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1039,8 +1039,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= -golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 5a0dea1d91cdd6321b5e7b6f38de850821d6872e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20T=C3=B6lle?= Date: Mon, 17 Jul 2023 14:30:23 +0200 Subject: [PATCH 39/40] docs: use actual flag for signing off commits MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flag is documented as --signoff: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt---signoff Signed-off-by: Julian Tölle --- .github/PULL_REQUEST_TEMPLATE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 9e78957ec2..95df72dd0e 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,7 +1,7 @@