From 8ab0a6e2add1a9e40350a32b6e709bdc87eb4080 Mon Sep 17 00:00:00 2001 From: Jerome Meyer Date: Thu, 30 Jan 2020 11:41:57 -0500 Subject: [PATCH 001/632] Added externalURL and pathPrefix functions to Template reference documentation Signed-off-by: Jerome Meyer --- docs/configuration/template_reference.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/configuration/template_reference.md b/docs/configuration/template_reference.md index fe2b60b23..475946923 100644 --- a/docs/configuration/template_reference.md +++ b/docs/configuration/template_reference.md @@ -82,6 +82,8 @@ versions. | args | []interface{} | map[string]interface{} | This converts a list of objects to a map with keys arg0, arg1 etc. This is intended to allow multiple arguments to be passed to templates. | | tmpl | string, []interface{} | nothing | Like the built-in `template`, but allows non-literals as the template name. Note that the result is assumed to be safe, and will not be auto-escaped. Only available in consoles. | | safeHtml | string | string | Marks string as HTML not requiring auto-escaping. | +| externalURL | none | string | The URL under which Prometheus is externally reachable (set with `--web.external-url`). | +| pathPrefix | none | string | Path portion of the external URL. | ## Template type differences From 8f33b8bd2cdbe6a59705773e86ea84bd28325b7c Mon Sep 17 00:00:00 2001 From: shoce Date: Mon, 18 Oct 2021 22:43:31 +0530 Subject: [PATCH 002/632] Add instructions how to persist prometheus docker container data Signed-off-by: shoce --- docs/installation.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/docs/installation.md b/docs/installation.md index 592d67b28..36ff61b36 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -55,6 +55,23 @@ docker run \ prom/prometheus ``` +### Save your Prometheus data + +Prometheus data is stored in `/prometheus` dir inside the container, so the data is cleared every time the container gets restarted. To save your data, you need to set up persistent storage (or bind mounts) for your container. + +Run Prometheus container with persistent storage: + +```bash +# Create persistent volume for your data +docker volume create prometheus-data +# Start Prometheus container +docker run \ + -p 9090:9090 \ + -v /path/to/prometheus.yml:/etc/prometheus/prometheus.yml \ + -v prometheus-data:/prometheus \ + prom/prometheus +``` + ### Custom image To avoid managing a file on the host and bind-mount it, the From c7c7847b6f12fa11821ab2927ba4b8d8888d9f07 Mon Sep 17 00:00:00 2001 From: Jinwook Jeong Date: Sat, 9 Apr 2022 01:03:54 +0900 Subject: [PATCH 003/632] Fix discovery managers to be properly cancelled Signed-off-by: Jinwook Jeong --- discovery/legacymanager/manager.go | 8 +++----- discovery/manager.go | 8 +++----- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/discovery/legacymanager/manager.go b/discovery/legacymanager/manager.go index 7a3d6b3b8..2c8ba18a0 100644 --- a/discovery/legacymanager/manager.go +++ b/discovery/legacymanager/manager.go @@ -140,11 +140,9 @@ type Manager struct { // Run starts the background processing func (m *Manager) Run() error { go m.sender() - for range m.ctx.Done() { - m.cancelDiscoverers() - return m.ctx.Err() - } - return nil + <-m.ctx.Done() + m.cancelDiscoverers() + return m.ctx.Err() } // SyncCh returns a read only channel used by all the clients to receive target updates. diff --git a/discovery/manager.go b/discovery/manager.go index 088b9722c..6747c5690 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -166,11 +166,9 @@ type Manager struct { // Run starts the background processing. func (m *Manager) Run() error { go m.sender() - for range m.ctx.Done() { - m.cancelDiscoverers() - return m.ctx.Err() - } - return nil + <-m.ctx.Done() + m.cancelDiscoverers() + return m.ctx.Err() } // SyncCh returns a read only channel used by all the clients to receive target updates. From ff3d4e91dcf8c06ee9206853cd1e1e30c4c52bb3 Mon Sep 17 00:00:00 2001 From: Nolwenn Cauchois Date: Fri, 20 May 2022 12:35:16 +0200 Subject: [PATCH 004/632] mixin: Use url filter on Remote Write dashboard Signed-off-by: Nolwenn Cauchois --- .../prometheus-mixin/dashboards.libsonnet | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/documentation/prometheus-mixin/dashboards.libsonnet b/documentation/prometheus-mixin/dashboards.libsonnet index b95f13e0a..8d4ff6115 100644 --- a/documentation/prometheus-mixin/dashboards.libsonnet +++ b/documentation/prometheus-mixin/dashboards.libsonnet @@ -117,7 +117,7 @@ local template = grafana.template; ( prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"} - - ignoring(remote_name, url) group_right(instance) (prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance"} != 0) + ignoring(remote_name, url) group_right(instance) (prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance", url=~"$url"} != 0) ) |||, legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}', @@ -134,7 +134,7 @@ local template = grafana.template; clamp_min( rate(prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"}[5m]) - - ignoring (remote_name, url) group_right(instance) rate(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance"}[5m]) + ignoring (remote_name, url) group_right(instance) rate(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) , 0) |||, legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}', @@ -151,9 +151,9 @@ local template = grafana.template; rate( prometheus_remote_storage_samples_in_total{cluster=~"$cluster", instance=~"$instance"}[5m]) - - ignoring(remote_name, url) group_right(instance) (rate(prometheus_remote_storage_succeeded_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m]) or rate(prometheus_remote_storage_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m])) + ignoring(remote_name, url) group_right(instance) (rate(prometheus_remote_storage_succeeded_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])) - - (rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance"}[5m])) + (rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])) |||, legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); @@ -166,7 +166,7 @@ local template = grafana.template; min_span=6, ) .addTarget(prometheus.target( - 'prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance"}', + 'prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); @@ -177,7 +177,7 @@ local template = grafana.template; span=4, ) .addTarget(prometheus.target( - 'prometheus_remote_storage_shards_max{cluster=~"$cluster", instance=~"$instance"}', + 'prometheus_remote_storage_shards_max{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); @@ -188,7 +188,7 @@ local template = grafana.template; span=4, ) .addTarget(prometheus.target( - 'prometheus_remote_storage_shards_min{cluster=~"$cluster", instance=~"$instance"}', + 'prometheus_remote_storage_shards_min{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); @@ -199,7 +199,7 @@ local template = grafana.template; span=4, ) .addTarget(prometheus.target( - 'prometheus_remote_storage_shards_desired{cluster=~"$cluster", instance=~"$instance"}', + 'prometheus_remote_storage_shards_desired{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); @@ -210,7 +210,7 @@ local template = grafana.template; span=6, ) .addTarget(prometheus.target( - 'prometheus_remote_storage_shard_capacity{cluster=~"$cluster", instance=~"$instance"}', + 'prometheus_remote_storage_shard_capacity{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); @@ -222,7 +222,7 @@ local template = grafana.template; span=6, ) .addTarget(prometheus.target( - 'prometheus_remote_storage_pending_samples{cluster=~"$cluster", instance=~"$instance"} or prometheus_remote_storage_samples_pending{cluster=~"$cluster", instance=~"$instance"}', + 'prometheus_remote_storage_pending_samples{cluster=~"$cluster", instance=~"$instance", url=~"$url"} or prometheus_remote_storage_samples_pending{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); @@ -257,7 +257,7 @@ local template = grafana.template; span=3, ) .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance"}[5m])', + 'rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])', legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); @@ -268,7 +268,7 @@ local template = grafana.template; span=3, ) .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_failed_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{cluster=~"$cluster", instance=~"$instance"}[5m])', + 'rate(prometheus_remote_storage_failed_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])', legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); @@ -279,7 +279,7 @@ local template = grafana.template; span=3, ) .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_retried_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m]) or rate(prometheus_remote_storage_samples_retried_total{cluster=~"$cluster", instance=~"$instance"}[5m])', + 'rate(prometheus_remote_storage_retried_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_retried_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])', legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); @@ -290,7 +290,7 @@ local template = grafana.template; span=3, ) .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_enqueue_retries_total{cluster=~"$cluster", instance=~"$instance"}[5m])', + 'rate(prometheus_remote_storage_enqueue_retries_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])', legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); From c2af0de522b9d89ed681035262be9d6ca0452381 Mon Sep 17 00:00:00 2001 From: songjiayang Date: Sun, 12 Jun 2022 08:06:14 +0800 Subject: [PATCH 005/632] make sure response error when TOC parse failed Signed-off-by: songjiayang --- tsdb/index/index.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 29295c45f..c81441a6b 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -174,18 +174,15 @@ func NewTOCFromByteSlice(bs ByteSlice) (*TOC, error) { return nil, errors.Wrap(encoding.ErrInvalidChecksum, "read TOC") } - if err := d.Err(); err != nil { - return nil, err - } - - return &TOC{ + toc := &TOC{ Symbols: d.Be64(), Series: d.Be64(), LabelIndices: d.Be64(), LabelIndicesTable: d.Be64(), Postings: d.Be64(), PostingsTable: d.Be64(), - }, nil + } + return toc, d.Err() } // NewWriter returns a new Writer to the given filename. It serializes data in format version 2. From 00ba2f9a46763389cfea922391c0b1c00cc6baba Mon Sep 17 00:00:00 2001 From: Ashish Kurmi Date: Wed, 7 Sep 2022 21:27:16 -0700 Subject: [PATCH 006/632] ci: add minimum GitHub token permissions for workflows Signed-off-by: Ashish Kurmi --- .github/workflows/buf-lint.yml | 3 +++ .github/workflows/buf.yml | 3 +++ .github/workflows/codeql-analysis.yml | 3 +++ .github/workflows/funcbench.yml | 3 +++ .github/workflows/fuzzing.yml | 3 +++ .github/workflows/repo_sync.yml | 3 +++ 6 files changed, 18 insertions(+) diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index 37756adbf..bb5d78e5e 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -4,6 +4,9 @@ on: paths: - ".github/workflows/buf-lint.yml" - "**.proto" +permissions: + contents: read + jobs: buf: name: lint diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index 4fe8c86b3..ee06981e0 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -3,6 +3,9 @@ on: push: branches: - main +permissions: + contents: read + jobs: buf: name: lint and publish diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 01075f0c2..298c0701a 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -6,6 +6,9 @@ on: schedule: - cron: "26 14 * * 1" +permissions: + contents: read + jobs: analyze: name: Analyze diff --git a/.github/workflows/funcbench.yml b/.github/workflows/funcbench.yml index 6583aa95b..0826bcabe 100644 --- a/.github/workflows/funcbench.yml +++ b/.github/workflows/funcbench.yml @@ -2,6 +2,9 @@ on: repository_dispatch: types: [funcbench_start] name: Funcbench Workflow +permissions: + contents: read + jobs: run_funcbench: name: Running funcbench diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml index 87c40d310..d0751f2fb 100644 --- a/.github/workflows/fuzzing.yml +++ b/.github/workflows/fuzzing.yml @@ -1,6 +1,9 @@ name: CIFuzz on: workflow_call: +permissions: + contents: read + jobs: Fuzzing: runs-on: ubuntu-latest diff --git a/.github/workflows/repo_sync.yml b/.github/workflows/repo_sync.yml index ca8197878..392d801b0 100644 --- a/.github/workflows/repo_sync.yml +++ b/.github/workflows/repo_sync.yml @@ -2,6 +2,9 @@ on: schedule: - cron: '44 17 * * *' +permissions: + contents: read + jobs: repo_sync: runs-on: ubuntu-latest From 3fb881af261361a6083eedb329dd3aef45a377fe Mon Sep 17 00:00:00 2001 From: Dimitar Dimitrov Date: Sun, 11 Sep 2022 11:21:03 +0200 Subject: [PATCH 007/632] Simplify rule group's EvalTimestamp formula I found it hard to understand how EvalTimestamp works, so I wanted to simplify the math there. This PR should be a noop. Current formula is: ``` offset = g.hash % g.interval adjNow = startTime - offset base = adjNow - (adjNow % g.interval) EvalTimestamp = base + offset ``` I simplify `EvalTimestamp` ``` EvalTimestamp = base + offset # expand base = adjNow - (adjNow % g.interval) + offset # expand adjNow = startTime - offset - ((startTime - offset) % g.interval) + offset # cancel out offset = startTime - ((startTime - offset) % g.interval) # expand A+B (mod M) = (A (mod M) + B (mod M)) (mod M) = startTime - (startTime % g.interval - offset % g.interval) % g.interval # expand offset = startTime - (startTime % g.interval - ((g.hash % g.interval) % g.interval)) % g.interval # remove redundant mod g.interval = startTime - (startTime % g.interval - g.hash % g.interval) % g.interval # simplify (A (mod M) + B (mod M)) (mod M) = A+B (mod M) = startTime - (startTime - g.hash) % g.interval offset = (startTime - g.hash) % g.interval EvalTimestamp = startTime - offset ``` Signed-off-by: Dimitar Dimitrov --- rules/manager.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/rules/manager.go b/rules/manager.go index 5eed4bfb0..140fb45b7 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -532,13 +532,9 @@ func (g *Group) setLastEvaluation(ts time.Time) { // EvalTimestamp returns the immediately preceding consistently slotted evaluation time. func (g *Group) EvalTimestamp(startTime int64) time.Time { - var ( - offset = int64(g.hash() % uint64(g.interval)) - adjNow = startTime - offset - base = adjNow - (adjNow % int64(g.interval)) - ) + offset := (uint64(startTime) - g.hash()) % uint64(g.interval) - return time.Unix(0, base+offset).UTC() + return time.Unix(0, startTime-int64(offset)).UTC() } func nameAndLabels(rule Rule) string { From ecfaa48a17d4f0f8f35d3b7e19d4b6f969f02c47 Mon Sep 17 00:00:00 2001 From: Ashish Kurmi <100655670+boahc077@users.noreply.github.com> Date: Mon, 19 Sep 2022 01:17:46 -0700 Subject: [PATCH 008/632] Update .github/workflows/codeql-analysis.yml Co-authored-by: Christian Hoffmann Signed-off-by: Ashish Kurmi <100655670+boahc077@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 298c0701a..f0d3c060e 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -8,6 +8,7 @@ on: permissions: contents: read + security-events: write jobs: analyze: From ea40c15fe9d69273281bda41ab3e300087156ec8 Mon Sep 17 00:00:00 2001 From: Guillaume Berche Date: Mon, 3 Oct 2022 18:06:27 +0200 Subject: [PATCH 009/632] Refine documentation for label_replace Illustrate use of named capturing group syntax available from https://github.com/google/re2/wiki/Syntax and their usage in the replacement field Signed-off-by: Guillaume Berche --- docs/querying/functions.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index 5f0774136..79502a19c 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -292,13 +292,13 @@ For each timeseries in `v`, `label_replace(v instant-vector, dst_label string, r matches the regular expression `regex` against the value of the label `src_label`. If it matches, the value of the label `dst_label` in the returned timeseries will be the expansion of `replacement`, together with the original labels in the input. Capturing groups in the -regular expression can be referenced with `$1`, `$2`, etc. If the regular expression doesn't -match then the timeseries is returned unchanged. +regular expression can be referenced with `$1`, `$2`, etc. Named capturing groups in the regular expression can be referenced with `$name` (where `name` is the capturing group name). If the regular expression doesn't match then the timeseries is returned unchanged. -This example will return timeseries with the values `a:c` at label `service` and `a` at label `foo`: +These two examples will both return timeseries with the values `a:c` at label `service` and `a` at label `foo`: ``` label_replace(up{job="api-server",service="a:c"}, "foo", "$1", "service", "(.*):.*") +label_replace(up{job="api-server",service="a:c"}, "foo", "$name", "service", "(?P.*):(?P.*)") ``` ## `ln()` From 28a66e183d4e758e165ac84deb2cc86a5282fb54 Mon Sep 17 00:00:00 2001 From: Douglas Camata <159076+douglascamata@users.noreply.github.com> Date: Fri, 7 Oct 2022 13:15:33 +0200 Subject: [PATCH 010/632] Update relabel.Process comment Signed-off-by: Douglas Camata <159076+douglascamata@users.noreply.github.com> --- model/relabel/relabel.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index e0d7f6ddf..b4bb999f7 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -188,10 +188,12 @@ func (re Regexp) String() string { return str[4 : len(str)-2] } -// Process returns a relabeled copy of the given label set. The relabel configurations +// Process returns a relabeled version of the given label set. The relabel configurations // are applied in order of input. +// There are circumstances where Process will modify the input label. +// If you want to avoid issues with the input label set being modified, at the cost of +// higher memory usage, you can use lbls.Copy(). // If a label set is dropped, nil is returned. -// May return the input labelSet modified. func Process(lbls labels.Labels, cfgs ...*Config) labels.Labels { lb := labels.NewBuilder(nil) for _, cfg := range cfgs { From 03ab8dcca0040674de5c1a6a9caabdf5ab4c1d4b Mon Sep 17 00:00:00 2001 From: Dimitar Dimitrov Date: Wed, 12 Oct 2022 14:12:03 +0200 Subject: [PATCH 011/632] Add comments on EvalTimestamp Signed-off-by: Dimitar Dimitrov --- rules/manager.go | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/rules/manager.go b/rules/manager.go index 140fb45b7..e24eee2c0 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -532,9 +532,28 @@ func (g *Group) setLastEvaluation(ts time.Time) { // EvalTimestamp returns the immediately preceding consistently slotted evaluation time. func (g *Group) EvalTimestamp(startTime int64) time.Time { - offset := (uint64(startTime) - g.hash()) % uint64(g.interval) + var ( + offset = int64(g.hash() % uint64(g.interval)) - return time.Unix(0, startTime-int64(offset)).UTC() + // This group's evaluation times differ from the perfect time intervals by `offset` nanoseconds. + // But we can only use `% interval` to align with the interval. And `% interval` will always + // align with the perfect time intervals, instead of this group's. Because of this we add + // `offset` _after_ aligning with the perfect time interval. + // + // There can be cases where adding `offset` to the perfect evaluation time can yield a + // timestamp in the future, which is not what EvalTimestamp should do. + // So we subtract one `offset` to make sure that `now - (now % interval) + offset` gives an + // evaluation time in the past. + adjNow = startTime - offset + + // Adjust to perfect evaluation intervals. + base = adjNow - (adjNow % int64(g.interval)) + + // Add one offset to randomize the evaluation times of this group. + next = base + offset + ) + + return time.Unix(0, next).UTC() } func nameAndLabels(rule Rule) string { From e6b84ac1e09b1b7774686c6679babf34cf075f84 Mon Sep 17 00:00:00 2001 From: Guillaume Berche Date: Thu, 13 Oct 2022 10:40:52 +0200 Subject: [PATCH 012/632] functions.md doc refinement Removed named capture group example in label_replace Signed-off-by: Guillaume Berche --- docs/querying/functions.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index 79502a19c..3658c10c6 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -294,11 +294,10 @@ matches, the value of the label `dst_label` in the returned timeseries will be t of `replacement`, together with the original labels in the input. Capturing groups in the regular expression can be referenced with `$1`, `$2`, etc. Named capturing groups in the regular expression can be referenced with `$name` (where `name` is the capturing group name). If the regular expression doesn't match then the timeseries is returned unchanged. -These two examples will both return timeseries with the values `a:c` at label `service` and `a` at label `foo`: +This example will return timeseries with the values `a:c` at label `service` and `a` at label `foo`: ``` label_replace(up{job="api-server",service="a:c"}, "foo", "$1", "service", "(.*):.*") -label_replace(up{job="api-server",service="a:c"}, "foo", "$name", "service", "(?P.*):(?P.*)") ``` ## `ln()` From 52270d621668c3428531f68370c40065f13bbd90 Mon Sep 17 00:00:00 2001 From: Chance Feick Date: Mon, 14 Nov 2022 13:30:22 -0800 Subject: [PATCH 013/632] Fix relative link to use .md file extension Signed-off-by: Chance Feick --- docs/querying/basics.md | 2 +- docs/querying/functions.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/querying/basics.md b/docs/querying/basics.md index bc4478f62..5e9ddc119 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -35,7 +35,7 @@ vector is the only type that can be directly graphed. _Notes about the experimental native histograms:_ * Ingesting native histograms has to be enabled via a [feature - flag](../feature_flags/#native-histograms). + flag](../feature_flags.md#native-histograms). * Once native histograms have been ingested into the TSDB (and even after disabling the feature flag again), both instant vectors and range vectors may now contain samples that aren't simple floating point numbers (float samples) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index b4bc0a743..82513e1c0 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -14,7 +14,7 @@ vector, which if not provided it will default to the value of the expression _Notes about the experimental native histograms:_ * Ingesting native histograms has to be enabled via a [feature - flag](../feature_flags/#native-histograms). As long as no native histograms + flag](../feature_flags.md#native-histograms). As long as no native histograms have been ingested into the TSDB, all functions will behave as usual. * Functions that do not explicitly mention native histograms in their documentation (see below) effectively treat a native histogram as a float From 2cfd8da6280f97176ee877aaa0c5f724d20905de Mon Sep 17 00:00:00 2001 From: Chance Feick <6326742+chancefeick@users.noreply.github.com> Date: Tue, 15 Nov 2022 08:10:44 -0800 Subject: [PATCH 014/632] Update docs/querying/basics.md Co-authored-by: Julien Pivotto Signed-off-by: Chance Feick <6326742+chancefeick@users.noreply.github.com> --- docs/querying/basics.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/querying/basics.md b/docs/querying/basics.md index 5e9ddc119..c3aa7b804 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -35,7 +35,7 @@ vector is the only type that can be directly graphed. _Notes about the experimental native histograms:_ * Ingesting native histograms has to be enabled via a [feature - flag](../feature_flags.md#native-histograms). + flag](../../feature_flags/#native-histograms). * Once native histograms have been ingested into the TSDB (and even after disabling the feature flag again), both instant vectors and range vectors may now contain samples that aren't simple floating point numbers (float samples) From d542483e8c03bb58332c277ad7fd8e9d8eb547cb Mon Sep 17 00:00:00 2001 From: Aaron George Date: Wed, 30 Nov 2022 00:08:01 +0000 Subject: [PATCH 015/632] k8s discovery: Ensure that the pod IP is in the status before adding to target group Signed-off-by: Aaron George Signed-off-by: Aaron George --- discovery/kubernetes/endpoints.go | 23 +++++++----- discovery/kubernetes/endpoints_test.go | 43 ++++++++++++++++++++++ discovery/kubernetes/endpointslice.go | 23 +++++++----- discovery/kubernetes/endpointslice_test.go | 43 ++++++++++++++++++++++ 4 files changed, 112 insertions(+), 20 deletions(-) diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index 1f39c23e7..8c26af3c4 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -375,18 +375,21 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { continue } - a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) - ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + // PodIP can be empty when a pod is starting or has been evicted. + if len(pe.pod.Status.PodIP) != 0 { + a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) + ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) - target := model.LabelSet{ - model.AddressLabel: lv(a), - podContainerNameLabel: lv(c.Name), - podContainerImageLabel: lv(c.Image), - podContainerPortNameLabel: lv(cport.Name), - podContainerPortNumberLabel: lv(ports), - podContainerPortProtocolLabel: lv(string(cport.Protocol)), + target := model.LabelSet{ + model.AddressLabel: lv(a), + podContainerNameLabel: lv(c.Name), + podContainerImageLabel: lv(c.Image), + podContainerPortNameLabel: lv(cport.Name), + podContainerPortNumberLabel: lv(ports), + podContainerPortProtocolLabel: lv(string(cport.Protocol)), + } + tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } - tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } } } diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index 91b1b0c67..21190b905 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -825,3 +825,46 @@ func TestEndpointsDiscoveryOwnNamespace(t *testing.T) { }, }.Run(t) } + +func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) { + ep := makeEndpoints() + ep.Namespace = "ns" + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "ns", + UID: types.UID("deadbeef"), + }, + Spec: v1.PodSpec{ + NodeName: "testnode", + Containers: []v1.Container{ + { + Name: "p1", + Image: "p1:latest", + Ports: []v1.ContainerPort{ + { + Name: "mainport", + ContainerPort: 9000, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: v1.PodStatus{}, + } + + objs := []runtime.Object{ + ep, + pod, + } + + n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{IncludeOwnNamespace: true}, objs...) + + k8sDiscoveryTest{ + discovery: n, + expectedMaxItems: 0, + expectedRes: map[string]*targetgroup.Group{}, + }.Run(t) +} diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index 594759f45..c6e739496 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -393,18 +393,21 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou continue } - a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) - ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + // PodIP can be empty when a pod is starting or has been evicted. + if len(pe.pod.Status.PodIP) != 0 { + a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) + ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) - target := model.LabelSet{ - model.AddressLabel: lv(a), - podContainerNameLabel: lv(c.Name), - podContainerImageLabel: lv(c.Image), - podContainerPortNameLabel: lv(cport.Name), - podContainerPortNumberLabel: lv(ports), - podContainerPortProtocolLabel: lv(string(cport.Protocol)), + target := model.LabelSet{ + model.AddressLabel: lv(a), + podContainerNameLabel: lv(c.Name), + podContainerImageLabel: lv(c.Image), + podContainerPortNameLabel: lv(cport.Name), + podContainerPortNumberLabel: lv(ports), + podContainerPortProtocolLabel: lv(string(cport.Protocol)), + } + tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } - tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } } } diff --git a/discovery/kubernetes/endpointslice_test.go b/discovery/kubernetes/endpointslice_test.go index a0ae543fc..a1fcffaba 100644 --- a/discovery/kubernetes/endpointslice_test.go +++ b/discovery/kubernetes/endpointslice_test.go @@ -1074,3 +1074,46 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) { }, }.Run(t) } + +func TestEndpointSliceDiscoveryEmptyPodStatus(t *testing.T) { + ep := makeEndpointSliceV1() + ep.Namespace = "ns" + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "ns", + UID: types.UID("deadbeef"), + }, + Spec: corev1.PodSpec{ + NodeName: "testnode", + Containers: []corev1.Container{ + { + Name: "p1", + Image: "p1:latest", + Ports: []corev1.ContainerPort{ + { + Name: "mainport", + ContainerPort: 9000, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: corev1.PodStatus{}, + } + + objs := []runtime.Object{ + ep, + pod, + } + + n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{IncludeOwnNamespace: true}, objs...) + + k8sDiscoveryTest{ + discovery: n, + expectedMaxItems: 0, + expectedRes: map[string]*targetgroup.Group{}, + }.Run(t) +} From b6caa6cabf65ccffcf1532d5bb02e7af17371e6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Krupa=20=28paulfantom=29?= Date: Wed, 30 Nov 2022 14:02:54 +0100 Subject: [PATCH 016/632] documentation/mixin: use prometheus metrics for dashboard variables MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Paweł Krupa (paulfantom) --- documentation/prometheus-mixin/dashboards.libsonnet | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation/prometheus-mixin/dashboards.libsonnet b/documentation/prometheus-mixin/dashboards.libsonnet index b95f13e0a..b6e295a96 100644 --- a/documentation/prometheus-mixin/dashboards.libsonnet +++ b/documentation/prometheus-mixin/dashboards.libsonnet @@ -314,7 +314,7 @@ local template = grafana.template; template.new( 'cluster', '$datasource', - 'label_values(kube_pod_container_info{image=~".*prometheus.*"}, cluster)' % $._config, + 'label_values(prometheus_build_info, cluster)' % $._config, refresh='time', current={ selected: true, From 3e94dd8c8f83a11c4ac30e5a748b96adbc85cb40 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 25 Jan 2023 14:30:47 +1100 Subject: [PATCH 017/632] Add extension point for returning different content types from API endpoints Signed-off-by: Charles Korn --- web/api/v1/api.go | 64 ++++++-- web/api/v1/api_test.go | 290 ++++++++++++---------------------- web/api/v1/codec.go | 26 +++ web/api/v1/json_codec.go | 32 ++++ web/api/v1/json_codec_test.go | 178 +++++++++++++++++++++ 5 files changed, 391 insertions(+), 199 deletions(-) create mode 100644 web/api/v1/codec.go create mode 100644 web/api/v1/json_codec.go create mode 100644 web/api/v1/json_codec_test.go diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 894a8666a..525bc446a 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -80,6 +80,8 @@ const ( var LocalhostRepresentations = []string{"127.0.0.1", "localhost", "::1"} +var defaultCodec = JSONCodec{} + type apiError struct { typ errorType err error @@ -145,7 +147,8 @@ type RuntimeInfo struct { StorageRetention string `json:"storageRetention"` } -type response struct { +// Response contains a response to a HTTP API request. +type Response struct { Status status `json:"status"` Data interface{} `json:"data,omitempty"` ErrorType errorType `json:"errorType,omitempty"` @@ -208,6 +211,8 @@ type API struct { remoteWriteHandler http.Handler remoteReadHandler http.Handler + + codecs map[string]Codec } func init() { @@ -273,8 +278,12 @@ func NewAPI( statsRenderer: defaultStatsRenderer, remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame), + + codecs: map[string]Codec{}, } + a.InstallCodec(defaultCodec) + if statsRenderer != nil { a.statsRenderer = statsRenderer } @@ -286,6 +295,16 @@ func NewAPI( return a } +// InstallCodec adds codec to this API's available codecs. +// If codec handles a content type handled by a codec already installed in this API, codec replaces the previous codec. +func (api *API) InstallCodec(codec Codec) { + if api.codecs == nil { + api.codecs = map[string]Codec{} + } + + api.codecs[codec.ContentType()] = codec +} + func setUnavailStatusOnTSDBNotReady(r apiFuncResult) apiFuncResult { if r.err != nil && errors.Cause(r.err.err) == tsdb.ErrNotReady { r.err.typ = errorUnavailable @@ -308,7 +327,7 @@ func (api *API) Register(r *route.Router) { } if result.data != nil { - api.respond(w, result.data, result.warnings) + api.respond(w, r, result.data, result.warnings) return } w.WriteHeader(http.StatusNoContent) @@ -1446,7 +1465,7 @@ func (api *API) serveWALReplayStatus(w http.ResponseWriter, r *http.Request) { if err != nil { api.respondError(w, &apiError{errorInternal, err}, nil) } - api.respond(w, walReplayStatus{ + api.respond(w, r, walReplayStatus{ Min: status.Min, Max: status.Max, Current: status.Current, @@ -1548,34 +1567,59 @@ func (api *API) cleanTombstones(r *http.Request) apiFuncResult { return apiFuncResult{nil, nil, nil, nil} } -func (api *API) respond(w http.ResponseWriter, data interface{}, warnings storage.Warnings) { +func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface{}, warnings storage.Warnings) { statusMessage := statusSuccess var warningStrings []string for _, warning := range warnings { warningStrings = append(warningStrings, warning.Error()) } - json := jsoniter.ConfigCompatibleWithStandardLibrary - b, err := json.Marshal(&response{ + + resp := &Response{ Status: statusMessage, Data: data, Warnings: warningStrings, - }) + } + + codec := api.negotiateCodec(req, resp) + b, err := codec.Encode(resp) if err != nil { - level.Error(api.logger).Log("msg", "error marshaling json response", "err", err) + level.Error(api.logger).Log("msg", "error marshaling response", "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", codec.ContentType()) w.WriteHeader(http.StatusOK) if n, err := w.Write(b); err != nil { level.Error(api.logger).Log("msg", "error writing response", "bytesWritten", n, "err", err) } } +// HTTP content negotiation is hard (see https://developer.mozilla.org/en-US/docs/Web/HTTP/Content_negotiation). +// Ideally, we shouldn't be implementing this ourselves - https://github.com/golang/go/issues/19307 is an open proposal to add +// this to the Go stdlib and has links to a number of other implementations. +// +// This is an MVP, and doesn't support features like wildcards or weighting. +func (api *API) negotiateCodec(req *http.Request, resp *Response) Codec { + acceptHeader := req.Header.Get("Accept") + if acceptHeader == "" { + return defaultCodec + } + + for _, contentType := range strings.Split(acceptHeader, ",") { + codec, ok := api.codecs[strings.TrimSpace(contentType)] + if ok && codec.CanEncode(resp) { + return codec + } + } + + level.Warn(api.logger).Log("msg", "could not find suitable codec for response, falling back to default codec", "accept_header", acceptHeader) + return defaultCodec +} + func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data interface{}) { json := jsoniter.ConfigCompatibleWithStandardLibrary - b, err := json.Marshal(&response{ + b, err := json.Marshal(&Response{ Status: statusError, ErrorType: apiErr.typ, Error: apiErr.err.Error(), diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 7e2dcbd8b..617a8bdf3 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -18,7 +18,6 @@ import ( "encoding/json" "fmt" "io" - "math" "net/http" "net/http/httptest" "net/url" @@ -30,7 +29,6 @@ import ( "testing" "time" - "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/util/stats" @@ -2765,39 +2763,93 @@ func TestAdminEndpoints(t *testing.T) { } func TestRespondSuccess(t *testing.T) { + api := API{ + logger: log.NewNopLogger(), + } + + api.InstallCodec(&testCodec{contentType: "test/cannot-encode", canEncode: false}) + api.InstallCodec(&testCodec{contentType: "test/can-encode", canEncode: true}) + api.InstallCodec(&testCodec{contentType: "test/can-encode-2", canEncode: true}) + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - api := API{} - api.respond(w, "test", nil) + api.respond(w, r, "test", nil) })) defer s.Close() - resp, err := http.Get(s.URL) - if err != nil { - t.Fatalf("Error on test request: %s", err) - } - body, err := io.ReadAll(resp.Body) - defer resp.Body.Close() - if err != nil { - t.Fatalf("Error reading response body: %s", err) - } + for _, tc := range []struct { + name string + acceptHeader string + expectedContentType string + expectedBody string + }{ + { + name: "no Accept header", + expectedContentType: "application/json", + expectedBody: `{"status":"success","data":"test"}`, + }, + { + name: "Accept header with single content type which is suitable", + acceptHeader: "test/can-encode", + expectedContentType: "test/can-encode", + expectedBody: `response from test/can-encode codec`, + }, + { + name: "Accept header with single content type which is not available", + acceptHeader: "test/not-registered", + expectedContentType: "application/json", + expectedBody: `{"status":"success","data":"test"}`, + }, + { + name: "Accept header with single content type which cannot encode the response payload", + acceptHeader: "test/cannot-encode", + expectedContentType: "application/json", + expectedBody: `{"status":"success","data":"test"}`, + }, + { + name: "Accept header with multiple content types, all of which are suitable", + acceptHeader: "test/can-encode, test/can-encode-2", + expectedContentType: "test/can-encode", + expectedBody: `response from test/can-encode codec`, + }, + { + name: "Accept header with multiple content types, only one of which is available", + acceptHeader: "test/not-registered, test/can-encode", + expectedContentType: "test/can-encode", + expectedBody: `response from test/can-encode codec`, + }, + { + name: "Accept header with multiple content types, only one of which can encode the response payload", + acceptHeader: "test/cannot-encode, test/can-encode", + expectedContentType: "test/can-encode", + expectedBody: `response from test/can-encode codec`, + }, + { + name: "Accept header with multiple content types, none of which are available", + acceptHeader: "test/not-registered, test/also-not-registered", + expectedContentType: "application/json", + expectedBody: `{"status":"success","data":"test"}`, + }, + } { + t.Run(tc.name, func(t *testing.T) { + req, err := http.NewRequest(http.MethodGet, s.URL, nil) + require.NoError(t, err) - if resp.StatusCode != 200 { - t.Fatalf("Return code %d expected in success response but got %d", 200, resp.StatusCode) - } - if h := resp.Header.Get("Content-Type"); h != "application/json" { - t.Fatalf("Expected Content-Type %q but got %q", "application/json", h) - } + if tc.acceptHeader != "" { + req.Header.Set("Accept", tc.acceptHeader) + } - var res response - if err = json.Unmarshal([]byte(body), &res); err != nil { - t.Fatalf("Error unmarshaling JSON body: %s", err) - } + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) - exp := &response{ - Status: statusSuccess, - Data: "test", + body, err := io.ReadAll(resp.Body) + defer resp.Body.Close() + require.NoError(t, err) + + require.Equal(t, http.StatusOK, resp.StatusCode) + require.Equal(t, tc.expectedContentType, resp.Header.Get("Content-Type")) + require.Equal(t, tc.expectedBody, string(body)) + }) } - require.Equal(t, exp, &res) } func TestRespondError(t *testing.T) { @@ -2824,12 +2876,12 @@ func TestRespondError(t *testing.T) { t.Fatalf("Expected Content-Type %q but got %q", "application/json", h) } - var res response + var res Response if err = json.Unmarshal([]byte(body), &res); err != nil { t.Fatalf("Error unmarshaling JSON body: %s", err) } - exp := &response{ + exp := &Response{ Status: statusError, Data: "test", ErrorType: errorTimeout, @@ -3047,165 +3099,6 @@ func TestOptionsMethod(t *testing.T) { } } -func TestRespond(t *testing.T) { - cases := []struct { - response interface{} - expected string - }{ - { - response: &queryData{ - ResultType: parser.ValueTypeMatrix, - Result: promql.Matrix{ - promql.Series{ - Points: []promql.Point{{V: 1, T: 1000}}, - Metric: labels.FromStrings("__name__", "foo"), - }, - }, - }, - expected: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"foo"},"values":[[1,"1"]]}]}}`, - }, - { - response: &queryData{ - ResultType: parser.ValueTypeMatrix, - Result: promql.Matrix{ - promql.Series{ - Points: []promql.Point{{H: &histogram.FloatHistogram{ - Schema: 2, - ZeroThreshold: 0.001, - ZeroCount: 12, - Count: 10, - Sum: 20, - PositiveSpans: []histogram.Span{ - {Offset: 3, Length: 2}, - {Offset: 1, Length: 3}, - }, - NegativeSpans: []histogram.Span{ - {Offset: 2, Length: 2}, - }, - PositiveBuckets: []float64{1, 2, 2, 1, 1}, - NegativeBuckets: []float64{2, 1}, - }, T: 1000}}, - Metric: labels.FromStrings("__name__", "foo"), - }, - }, - }, - expected: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"foo"},"histograms":[[1,{"count":"10","sum":"20","buckets":[[1,"-1.6817928305074288","-1.414213562373095","1"],[1,"-1.414213562373095","-1.189207115002721","2"],[3,"-0.001","0.001","12"],[0,"1.414213562373095","1.6817928305074288","1"],[0,"1.6817928305074288","2","2"],[0,"2.378414230005442","2.82842712474619","2"],[0,"2.82842712474619","3.3635856610148576","1"],[0,"3.3635856610148576","4","1"]]}]]}]}}`, - }, - { - response: promql.Point{V: 0, T: 0}, - expected: `{"status":"success","data":[0,"0"]}`, - }, - { - response: promql.Point{V: 20, T: 1}, - expected: `{"status":"success","data":[0.001,"20"]}`, - }, - { - response: promql.Point{V: 20, T: 10}, - expected: `{"status":"success","data":[0.010,"20"]}`, - }, - { - response: promql.Point{V: 20, T: 100}, - expected: `{"status":"success","data":[0.100,"20"]}`, - }, - { - response: promql.Point{V: 20, T: 1001}, - expected: `{"status":"success","data":[1.001,"20"]}`, - }, - { - response: promql.Point{V: 20, T: 1010}, - expected: `{"status":"success","data":[1.010,"20"]}`, - }, - { - response: promql.Point{V: 20, T: 1100}, - expected: `{"status":"success","data":[1.100,"20"]}`, - }, - { - response: promql.Point{V: 20, T: 12345678123456555}, - expected: `{"status":"success","data":[12345678123456.555,"20"]}`, - }, - { - response: promql.Point{V: 20, T: -1}, - expected: `{"status":"success","data":[-0.001,"20"]}`, - }, - { - response: promql.Point{V: math.NaN(), T: 0}, - expected: `{"status":"success","data":[0,"NaN"]}`, - }, - { - response: promql.Point{V: math.Inf(1), T: 0}, - expected: `{"status":"success","data":[0,"+Inf"]}`, - }, - { - response: promql.Point{V: math.Inf(-1), T: 0}, - expected: `{"status":"success","data":[0,"-Inf"]}`, - }, - { - response: promql.Point{V: 1.2345678e6, T: 0}, - expected: `{"status":"success","data":[0,"1234567.8"]}`, - }, - { - response: promql.Point{V: 1.2345678e-6, T: 0}, - expected: `{"status":"success","data":[0,"0.0000012345678"]}`, - }, - { - response: promql.Point{V: 1.2345678e-67, T: 0}, - expected: `{"status":"success","data":[0,"1.2345678e-67"]}`, - }, - { - response: []exemplar.QueryResult{ - { - SeriesLabels: labels.FromStrings("foo", "bar"), - Exemplars: []exemplar.Exemplar{ - { - Labels: labels.FromStrings("traceID", "abc"), - Value: 100.123, - Ts: 1234, - }, - }, - }, - }, - expected: `{"status":"success","data":[{"seriesLabels":{"foo":"bar"},"exemplars":[{"labels":{"traceID":"abc"},"value":"100.123","timestamp":1.234}]}]}`, - }, - { - response: []exemplar.QueryResult{ - { - SeriesLabels: labels.FromStrings("foo", "bar"), - Exemplars: []exemplar.Exemplar{ - { - Labels: labels.FromStrings("traceID", "abc"), - Value: math.Inf(1), - Ts: 1234, - }, - }, - }, - }, - expected: `{"status":"success","data":[{"seriesLabels":{"foo":"bar"},"exemplars":[{"labels":{"traceID":"abc"},"value":"+Inf","timestamp":1.234}]}]}`, - }, - } - - for _, c := range cases { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - api := API{} - api.respond(w, c.response, nil) - })) - defer s.Close() - - resp, err := http.Get(s.URL) - if err != nil { - t.Fatalf("Error on test request: %s", err) - } - body, err := io.ReadAll(resp.Body) - defer resp.Body.Close() - if err != nil { - t.Fatalf("Error reading response body: %s", err) - } - - if string(body) != c.expected { - t.Fatalf("Expected response \n%v\n but got \n%v\n", c.expected, string(body)) - } - } -} - func TestTSDBStatus(t *testing.T) { tsdb := &fakeDB{} tsdbStatusAPI := func(api *API) apiFunc { return api.serveTSDBStatus } @@ -3281,6 +3174,8 @@ var testResponseWriter = httptest.ResponseRecorder{} func BenchmarkRespond(b *testing.B) { b.ReportAllocs() + request, err := http.NewRequest(http.MethodGet, "/does-not-matter", nil) + require.NoError(b, err) points := []promql.Point{} for i := 0; i < 10000; i++ { points = append(points, promql.Point{V: float64(i * 1000000), T: int64(i)}) @@ -3297,7 +3192,7 @@ func BenchmarkRespond(b *testing.B) { b.ResetTimer() api := API{} for n := 0; n < b.N; n++ { - api.respond(&testResponseWriter, response, nil) + api.respond(&testResponseWriter, request, response, nil) } } @@ -3408,3 +3303,20 @@ func TestGetGlobalURL(t *testing.T) { }) } } + +type testCodec struct { + contentType string + canEncode bool +} + +func (t *testCodec) ContentType() string { + return t.contentType +} + +func (t *testCodec) CanEncode(_ *Response) bool { + return t.canEncode +} + +func (t *testCodec) Encode(_ *Response) ([]byte, error) { + return []byte(fmt.Sprintf("response from %v codec", t.contentType)), nil +} diff --git a/web/api/v1/codec.go b/web/api/v1/codec.go new file mode 100644 index 000000000..d11bb1fa0 --- /dev/null +++ b/web/api/v1/codec.go @@ -0,0 +1,26 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +// A Codec performs encoding of API responses. +type Codec interface { + // ContentType returns the MIME time that this Codec emits. + ContentType() string + + // CanEncode determines if this Codec can encode resp. + CanEncode(resp *Response) bool + + // Encode encodes resp, ready for transmission to an API consumer. + Encode(resp *Response) ([]byte, error) +} diff --git a/web/api/v1/json_codec.go b/web/api/v1/json_codec.go new file mode 100644 index 000000000..b38dab038 --- /dev/null +++ b/web/api/v1/json_codec.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import jsoniter "github.com/json-iterator/go" + +// JSONCodec is a Codec that encodes API responses as JSON. +type JSONCodec struct{} + +func (j JSONCodec) ContentType() string { + return "application/json" +} + +func (j JSONCodec) CanEncode(_ *Response) bool { + return true +} + +func (j JSONCodec) Encode(resp *Response) ([]byte, error) { + json := jsoniter.ConfigCompatibleWithStandardLibrary + return json.Marshal(resp) +} diff --git a/web/api/v1/json_codec_test.go b/web/api/v1/json_codec_test.go new file mode 100644 index 000000000..c5b030ff9 --- /dev/null +++ b/web/api/v1/json_codec_test.go @@ -0,0 +1,178 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "math" + "testing" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser" +) + +func TestJsonCodec_Encode(t *testing.T) { + cases := []struct { + response interface{} + expected string + }{ + { + response: &queryData{ + ResultType: parser.ValueTypeMatrix, + Result: promql.Matrix{ + promql.Series{ + Points: []promql.Point{{V: 1, T: 1000}}, + Metric: labels.FromStrings("__name__", "foo"), + }, + }, + }, + expected: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"foo"},"values":[[1,"1"]]}]}}`, + }, + { + response: &queryData{ + ResultType: parser.ValueTypeMatrix, + Result: promql.Matrix{ + promql.Series{ + Points: []promql.Point{{H: &histogram.FloatHistogram{ + Schema: 2, + ZeroThreshold: 0.001, + ZeroCount: 12, + Count: 10, + Sum: 20, + PositiveSpans: []histogram.Span{ + {Offset: 3, Length: 2}, + {Offset: 1, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: 2, Length: 2}, + }, + PositiveBuckets: []float64{1, 2, 2, 1, 1}, + NegativeBuckets: []float64{2, 1}, + }, T: 1000}}, + Metric: labels.FromStrings("__name__", "foo"), + }, + }, + }, + expected: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"foo"},"histograms":[[1,{"count":"10","sum":"20","buckets":[[1,"-1.6817928305074288","-1.414213562373095","1"],[1,"-1.414213562373095","-1.189207115002721","2"],[3,"-0.001","0.001","12"],[0,"1.414213562373095","1.6817928305074288","1"],[0,"1.6817928305074288","2","2"],[0,"2.378414230005442","2.82842712474619","2"],[0,"2.82842712474619","3.3635856610148576","1"],[0,"3.3635856610148576","4","1"]]}]]}]}}`, + }, + { + response: promql.Point{V: 0, T: 0}, + expected: `{"status":"success","data":[0,"0"]}`, + }, + { + response: promql.Point{V: 20, T: 1}, + expected: `{"status":"success","data":[0.001,"20"]}`, + }, + { + response: promql.Point{V: 20, T: 10}, + expected: `{"status":"success","data":[0.010,"20"]}`, + }, + { + response: promql.Point{V: 20, T: 100}, + expected: `{"status":"success","data":[0.100,"20"]}`, + }, + { + response: promql.Point{V: 20, T: 1001}, + expected: `{"status":"success","data":[1.001,"20"]}`, + }, + { + response: promql.Point{V: 20, T: 1010}, + expected: `{"status":"success","data":[1.010,"20"]}`, + }, + { + response: promql.Point{V: 20, T: 1100}, + expected: `{"status":"success","data":[1.100,"20"]}`, + }, + { + response: promql.Point{V: 20, T: 12345678123456555}, + expected: `{"status":"success","data":[12345678123456.555,"20"]}`, + }, + { + response: promql.Point{V: 20, T: -1}, + expected: `{"status":"success","data":[-0.001,"20"]}`, + }, + { + response: promql.Point{V: math.NaN(), T: 0}, + expected: `{"status":"success","data":[0,"NaN"]}`, + }, + { + response: promql.Point{V: math.Inf(1), T: 0}, + expected: `{"status":"success","data":[0,"+Inf"]}`, + }, + { + response: promql.Point{V: math.Inf(-1), T: 0}, + expected: `{"status":"success","data":[0,"-Inf"]}`, + }, + { + response: promql.Point{V: 1.2345678e6, T: 0}, + expected: `{"status":"success","data":[0,"1234567.8"]}`, + }, + { + response: promql.Point{V: 1.2345678e-6, T: 0}, + expected: `{"status":"success","data":[0,"0.0000012345678"]}`, + }, + { + response: promql.Point{V: 1.2345678e-67, T: 0}, + expected: `{"status":"success","data":[0,"1.2345678e-67"]}`, + }, + { + response: []exemplar.QueryResult{ + { + SeriesLabels: labels.FromStrings("foo", "bar"), + Exemplars: []exemplar.Exemplar{ + { + Labels: labels.FromStrings("traceID", "abc"), + Value: 100.123, + Ts: 1234, + }, + }, + }, + }, + expected: `{"status":"success","data":[{"seriesLabels":{"foo":"bar"},"exemplars":[{"labels":{"traceID":"abc"},"value":"100.123","timestamp":1.234}]}]}`, + }, + { + response: []exemplar.QueryResult{ + { + SeriesLabels: labels.FromStrings("foo", "bar"), + Exemplars: []exemplar.Exemplar{ + { + Labels: labels.FromStrings("traceID", "abc"), + Value: math.Inf(1), + Ts: 1234, + }, + }, + }, + }, + expected: `{"status":"success","data":[{"seriesLabels":{"foo":"bar"},"exemplars":[{"labels":{"traceID":"abc"},"value":"+Inf","timestamp":1.234}]}]}`, + }, + } + + codec := JSONCodec{} + + for _, c := range cases { + body, err := codec.Encode(&Response{ + Status: statusSuccess, + Data: c.response, + }) + if err != nil { + t.Fatalf("Error encoding response body: %s", err) + } + + if string(body) != c.expected { + t.Fatalf("Expected response \n%v\n but got \n%v\n", c.expected, string(body)) + } + } +} From a0dd1468be609a0b4933ec094777f36e8d743e6c Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Tue, 31 Jan 2023 14:53:20 +1100 Subject: [PATCH 018/632] Move custom jsoniter code into json_codec.go. Signed-off-by: Charles Korn --- web/api/v1/api.go | 255 ------------------------------------- web/api/v1/json_codec.go | 262 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 261 insertions(+), 256 deletions(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 525bc446a..e0ad76c5c 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -27,7 +27,6 @@ import ( "strconv" "strings" "time" - "unsafe" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -40,8 +39,6 @@ import ( "golang.org/x/exp/slices" "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/model/exemplar" - "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/model/timestamp" @@ -54,7 +51,6 @@ import ( "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/util/httputil" - "github.com/prometheus/prometheus/util/jsonutil" "github.com/prometheus/prometheus/util/stats" ) @@ -215,13 +211,6 @@ type API struct { codecs map[string]Codec } -func init() { - jsoniter.RegisterTypeEncoderFunc("promql.Series", marshalSeriesJSON, marshalSeriesJSONIsEmpty) - jsoniter.RegisterTypeEncoderFunc("promql.Sample", marshalSampleJSON, marshalSampleJSONIsEmpty) - jsoniter.RegisterTypeEncoderFunc("promql.Point", marshalPointJSON, marshalPointJSONIsEmpty) - jsoniter.RegisterTypeEncoderFunc("exemplar.Exemplar", marshalExemplarJSON, marshalExemplarJSONEmpty) -} - // NewAPI returns an initialized API type. func NewAPI( qe QueryEngine, @@ -1724,247 +1713,3 @@ OUTER: } return matcherSets, nil } - -// marshalSeriesJSON writes something like the following: -// -// { -// "metric" : { -// "__name__" : "up", -// "job" : "prometheus", -// "instance" : "localhost:9090" -// }, -// "values": [ -// [ 1435781451.781, "1" ], -// < more values> -// ], -// "histograms": [ -// [ 1435781451.781, { < histogram, see below > } ], -// < more histograms > -// ], -// }, -func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { - s := *((*promql.Series)(ptr)) - stream.WriteObjectStart() - stream.WriteObjectField(`metric`) - m, err := s.Metric.MarshalJSON() - if err != nil { - stream.Error = err - return - } - stream.SetBuffer(append(stream.Buffer(), m...)) - - // We make two passes through the series here: In the first marshaling - // all value points, in the second marshaling all histogram - // points. That's probably cheaper than just one pass in which we copy - // out histogram Points into a newly allocated slice for separate - // marshaling. (Could be benchmarked, though.) - var foundValue, foundHistogram bool - for _, p := range s.Points { - if p.H == nil { - stream.WriteMore() - if !foundValue { - stream.WriteObjectField(`values`) - stream.WriteArrayStart() - } - foundValue = true - marshalPointJSON(unsafe.Pointer(&p), stream) - } else { - foundHistogram = true - } - } - if foundValue { - stream.WriteArrayEnd() - } - if foundHistogram { - firstHistogram := true - for _, p := range s.Points { - if p.H != nil { - stream.WriteMore() - if firstHistogram { - stream.WriteObjectField(`histograms`) - stream.WriteArrayStart() - } - firstHistogram = false - marshalPointJSON(unsafe.Pointer(&p), stream) - } - } - stream.WriteArrayEnd() - } - stream.WriteObjectEnd() -} - -func marshalSeriesJSONIsEmpty(ptr unsafe.Pointer) bool { - return false -} - -// marshalSampleJSON writes something like the following for normal value samples: -// -// { -// "metric" : { -// "__name__" : "up", -// "job" : "prometheus", -// "instance" : "localhost:9090" -// }, -// "value": [ 1435781451.781, "1" ] -// }, -// -// For histogram samples, it writes something like this: -// -// { -// "metric" : { -// "__name__" : "up", -// "job" : "prometheus", -// "instance" : "localhost:9090" -// }, -// "histogram": [ 1435781451.781, { < histogram, see below > } ] -// }, -func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { - s := *((*promql.Sample)(ptr)) - stream.WriteObjectStart() - stream.WriteObjectField(`metric`) - m, err := s.Metric.MarshalJSON() - if err != nil { - stream.Error = err - return - } - stream.SetBuffer(append(stream.Buffer(), m...)) - stream.WriteMore() - if s.Point.H == nil { - stream.WriteObjectField(`value`) - } else { - stream.WriteObjectField(`histogram`) - } - marshalPointJSON(unsafe.Pointer(&s.Point), stream) - stream.WriteObjectEnd() -} - -func marshalSampleJSONIsEmpty(ptr unsafe.Pointer) bool { - return false -} - -// marshalPointJSON writes `[ts, "val"]`. -func marshalPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { - p := *((*promql.Point)(ptr)) - stream.WriteArrayStart() - jsonutil.MarshalTimestamp(p.T, stream) - stream.WriteMore() - if p.H == nil { - jsonutil.MarshalValue(p.V, stream) - } else { - marshalHistogram(p.H, stream) - } - stream.WriteArrayEnd() -} - -func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool { - return false -} - -// marshalHistogramJSON writes something like: -// -// { -// "count": "42", -// "sum": "34593.34", -// "buckets": [ -// [ 3, "-0.25", "0.25", "3"], -// [ 0, "0.25", "0.5", "12"], -// [ 0, "0.5", "1", "21"], -// [ 0, "2", "4", "6"] -// ] -// } -// -// The 1st element in each bucket array determines if the boundaries are -// inclusive (AKA closed) or exclusive (AKA open): -// -// 0: lower exclusive, upper inclusive -// 1: lower inclusive, upper exclusive -// 2: both exclusive -// 3: both inclusive -// -// The 2nd and 3rd elements are the lower and upper boundary. The 4th element is -// the bucket count. -func marshalHistogram(h *histogram.FloatHistogram, stream *jsoniter.Stream) { - stream.WriteObjectStart() - stream.WriteObjectField(`count`) - jsonutil.MarshalValue(h.Count, stream) - stream.WriteMore() - stream.WriteObjectField(`sum`) - jsonutil.MarshalValue(h.Sum, stream) - - bucketFound := false - it := h.AllBucketIterator() - for it.Next() { - bucket := it.At() - if bucket.Count == 0 { - continue // No need to expose empty buckets in JSON. - } - stream.WriteMore() - if !bucketFound { - stream.WriteObjectField(`buckets`) - stream.WriteArrayStart() - } - bucketFound = true - boundaries := 2 // Exclusive on both sides AKA open interval. - if bucket.LowerInclusive { - if bucket.UpperInclusive { - boundaries = 3 // Inclusive on both sides AKA closed interval. - } else { - boundaries = 1 // Inclusive only on lower end AKA right open. - } - } else { - if bucket.UpperInclusive { - boundaries = 0 // Inclusive only on upper end AKA left open. - } - } - stream.WriteArrayStart() - stream.WriteInt(boundaries) - stream.WriteMore() - jsonutil.MarshalValue(bucket.Lower, stream) - stream.WriteMore() - jsonutil.MarshalValue(bucket.Upper, stream) - stream.WriteMore() - jsonutil.MarshalValue(bucket.Count, stream) - stream.WriteArrayEnd() - } - if bucketFound { - stream.WriteArrayEnd() - } - stream.WriteObjectEnd() -} - -// marshalExemplarJSON writes. -// -// { -// labels: , -// value: "", -// timestamp: -// } -func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { - p := *((*exemplar.Exemplar)(ptr)) - stream.WriteObjectStart() - - // "labels" key. - stream.WriteObjectField(`labels`) - lbls, err := p.Labels.MarshalJSON() - if err != nil { - stream.Error = err - return - } - stream.SetBuffer(append(stream.Buffer(), lbls...)) - - // "value" key. - stream.WriteMore() - stream.WriteObjectField(`value`) - jsonutil.MarshalValue(p.Value, stream) - - // "timestamp" key. - stream.WriteMore() - stream.WriteObjectField(`timestamp`) - jsonutil.MarshalTimestamp(p.Ts, stream) - - stream.WriteObjectEnd() -} - -func marshalExemplarJSONEmpty(ptr unsafe.Pointer) bool { - return false -} diff --git a/web/api/v1/json_codec.go b/web/api/v1/json_codec.go index b38dab038..455af717d 100644 --- a/web/api/v1/json_codec.go +++ b/web/api/v1/json_codec.go @@ -13,7 +13,23 @@ package v1 -import jsoniter "github.com/json-iterator/go" +import ( + "unsafe" + + jsoniter "github.com/json-iterator/go" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/util/jsonutil" +) + +func init() { + jsoniter.RegisterTypeEncoderFunc("promql.Series", marshalSeriesJSON, marshalSeriesJSONIsEmpty) + jsoniter.RegisterTypeEncoderFunc("promql.Sample", marshalSampleJSON, marshalSampleJSONIsEmpty) + jsoniter.RegisterTypeEncoderFunc("promql.Point", marshalPointJSON, marshalPointJSONIsEmpty) + jsoniter.RegisterTypeEncoderFunc("exemplar.Exemplar", marshalExemplarJSON, marshalExemplarJSONEmpty) +} // JSONCodec is a Codec that encodes API responses as JSON. type JSONCodec struct{} @@ -30,3 +46,247 @@ func (j JSONCodec) Encode(resp *Response) ([]byte, error) { json := jsoniter.ConfigCompatibleWithStandardLibrary return json.Marshal(resp) } + +// marshalSeriesJSON writes something like the following: +// +// { +// "metric" : { +// "__name__" : "up", +// "job" : "prometheus", +// "instance" : "localhost:9090" +// }, +// "values": [ +// [ 1435781451.781, "1" ], +// < more values> +// ], +// "histograms": [ +// [ 1435781451.781, { < histogram, see below > } ], +// < more histograms > +// ], +// }, +func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { + s := *((*promql.Series)(ptr)) + stream.WriteObjectStart() + stream.WriteObjectField(`metric`) + m, err := s.Metric.MarshalJSON() + if err != nil { + stream.Error = err + return + } + stream.SetBuffer(append(stream.Buffer(), m...)) + + // We make two passes through the series here: In the first marshaling + // all value points, in the second marshaling all histogram + // points. That's probably cheaper than just one pass in which we copy + // out histogram Points into a newly allocated slice for separate + // marshaling. (Could be benchmarked, though.) + var foundValue, foundHistogram bool + for _, p := range s.Points { + if p.H == nil { + stream.WriteMore() + if !foundValue { + stream.WriteObjectField(`values`) + stream.WriteArrayStart() + } + foundValue = true + marshalPointJSON(unsafe.Pointer(&p), stream) + } else { + foundHistogram = true + } + } + if foundValue { + stream.WriteArrayEnd() + } + if foundHistogram { + firstHistogram := true + for _, p := range s.Points { + if p.H != nil { + stream.WriteMore() + if firstHistogram { + stream.WriteObjectField(`histograms`) + stream.WriteArrayStart() + } + firstHistogram = false + marshalPointJSON(unsafe.Pointer(&p), stream) + } + } + stream.WriteArrayEnd() + } + stream.WriteObjectEnd() +} + +func marshalSeriesJSONIsEmpty(ptr unsafe.Pointer) bool { + return false +} + +// marshalSampleJSON writes something like the following for normal value samples: +// +// { +// "metric" : { +// "__name__" : "up", +// "job" : "prometheus", +// "instance" : "localhost:9090" +// }, +// "value": [ 1435781451.781, "1" ] +// }, +// +// For histogram samples, it writes something like this: +// +// { +// "metric" : { +// "__name__" : "up", +// "job" : "prometheus", +// "instance" : "localhost:9090" +// }, +// "histogram": [ 1435781451.781, { < histogram, see below > } ] +// }, +func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { + s := *((*promql.Sample)(ptr)) + stream.WriteObjectStart() + stream.WriteObjectField(`metric`) + m, err := s.Metric.MarshalJSON() + if err != nil { + stream.Error = err + return + } + stream.SetBuffer(append(stream.Buffer(), m...)) + stream.WriteMore() + if s.Point.H == nil { + stream.WriteObjectField(`value`) + } else { + stream.WriteObjectField(`histogram`) + } + marshalPointJSON(unsafe.Pointer(&s.Point), stream) + stream.WriteObjectEnd() +} + +func marshalSampleJSONIsEmpty(ptr unsafe.Pointer) bool { + return false +} + +// marshalPointJSON writes `[ts, "val"]`. +func marshalPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { + p := *((*promql.Point)(ptr)) + stream.WriteArrayStart() + jsonutil.MarshalTimestamp(p.T, stream) + stream.WriteMore() + if p.H == nil { + jsonutil.MarshalValue(p.V, stream) + } else { + marshalHistogram(p.H, stream) + } + stream.WriteArrayEnd() +} + +func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool { + return false +} + +// marshalHistogramJSON writes something like: +// +// { +// "count": "42", +// "sum": "34593.34", +// "buckets": [ +// [ 3, "-0.25", "0.25", "3"], +// [ 0, "0.25", "0.5", "12"], +// [ 0, "0.5", "1", "21"], +// [ 0, "2", "4", "6"] +// ] +// } +// +// The 1st element in each bucket array determines if the boundaries are +// inclusive (AKA closed) or exclusive (AKA open): +// +// 0: lower exclusive, upper inclusive +// 1: lower inclusive, upper exclusive +// 2: both exclusive +// 3: both inclusive +// +// The 2nd and 3rd elements are the lower and upper boundary. The 4th element is +// the bucket count. +func marshalHistogram(h *histogram.FloatHistogram, stream *jsoniter.Stream) { + stream.WriteObjectStart() + stream.WriteObjectField(`count`) + jsonutil.MarshalValue(h.Count, stream) + stream.WriteMore() + stream.WriteObjectField(`sum`) + jsonutil.MarshalValue(h.Sum, stream) + + bucketFound := false + it := h.AllBucketIterator() + for it.Next() { + bucket := it.At() + if bucket.Count == 0 { + continue // No need to expose empty buckets in JSON. + } + stream.WriteMore() + if !bucketFound { + stream.WriteObjectField(`buckets`) + stream.WriteArrayStart() + } + bucketFound = true + boundaries := 2 // Exclusive on both sides AKA open interval. + if bucket.LowerInclusive { + if bucket.UpperInclusive { + boundaries = 3 // Inclusive on both sides AKA closed interval. + } else { + boundaries = 1 // Inclusive only on lower end AKA right open. + } + } else { + if bucket.UpperInclusive { + boundaries = 0 // Inclusive only on upper end AKA left open. + } + } + stream.WriteArrayStart() + stream.WriteInt(boundaries) + stream.WriteMore() + jsonutil.MarshalValue(bucket.Lower, stream) + stream.WriteMore() + jsonutil.MarshalValue(bucket.Upper, stream) + stream.WriteMore() + jsonutil.MarshalValue(bucket.Count, stream) + stream.WriteArrayEnd() + } + if bucketFound { + stream.WriteArrayEnd() + } + stream.WriteObjectEnd() +} + +// marshalExemplarJSON writes. +// +// { +// labels: , +// value: "", +// timestamp: +// } +func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { + p := *((*exemplar.Exemplar)(ptr)) + stream.WriteObjectStart() + + // "labels" key. + stream.WriteObjectField(`labels`) + lbls, err := p.Labels.MarshalJSON() + if err != nil { + stream.Error = err + return + } + stream.SetBuffer(append(stream.Buffer(), lbls...)) + + // "value" key. + stream.WriteMore() + stream.WriteObjectField(`value`) + jsonutil.MarshalValue(p.Value, stream) + + // "timestamp" key. + stream.WriteMore() + stream.WriteObjectField(`timestamp`) + jsonutil.MarshalTimestamp(p.Ts, stream) + + stream.WriteObjectEnd() +} + +func marshalExemplarJSONEmpty(ptr unsafe.Pointer) bool { + return false +} From 857b23873f54afa642e4961071c326ea1e00eddf Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Thu, 2 Feb 2023 15:29:13 +1100 Subject: [PATCH 019/632] Expose QueryData so that implementations of Codec.CanEncode() can perform a type assertion against Response.Data. Signed-off-by: Charles Korn --- web/api/v1/api.go | 6 +++--- web/api/v1/api_test.go | 28 ++++++++++++++-------------- web/api/v1/json_codec_test.go | 4 ++-- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index e0ad76c5c..e9c1182ca 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -384,7 +384,7 @@ func (api *API) Register(r *route.Router) { r.Put("/admin/tsdb/snapshot", wrapAgent(api.snapshot)) } -type queryData struct { +type QueryData struct { ResultType parser.ValueType `json:"resultType"` Result parser.Value `json:"result"` Stats stats.QueryStats `json:"stats,omitempty"` @@ -446,7 +446,7 @@ func (api *API) query(r *http.Request) (result apiFuncResult) { } qs := sr(ctx, qry.Stats(), r.FormValue("stats")) - return apiFuncResult{&queryData{ + return apiFuncResult{&QueryData{ ResultType: res.Value.Type(), Result: res.Value, Stats: qs, @@ -537,7 +537,7 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) { } qs := sr(ctx, qry.Stats(), r.FormValue("stats")) - return apiFuncResult{&queryData{ + return apiFuncResult{&QueryData{ ResultType: res.Value.Type(), Result: res.Value, Stats: qs, diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 617a8bdf3..455eae13f 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -831,8 +831,8 @@ func TestStats(t *testing.T) { name: "stats is blank", param: "", expected: func(t *testing.T, i interface{}) { - require.IsType(t, i, &queryData{}) - qd := i.(*queryData) + require.IsType(t, i, &QueryData{}) + qd := i.(*QueryData) require.Nil(t, qd.Stats) }, }, @@ -840,8 +840,8 @@ func TestStats(t *testing.T) { name: "stats is true", param: "true", expected: func(t *testing.T, i interface{}) { - require.IsType(t, i, &queryData{}) - qd := i.(*queryData) + require.IsType(t, i, &QueryData{}) + qd := i.(*QueryData) require.NotNil(t, qd.Stats) qs := qd.Stats.Builtin() require.NotNil(t, qs.Timings) @@ -855,8 +855,8 @@ func TestStats(t *testing.T) { name: "stats is all", param: "all", expected: func(t *testing.T, i interface{}) { - require.IsType(t, i, &queryData{}) - qd := i.(*queryData) + require.IsType(t, i, &QueryData{}) + qd := i.(*QueryData) require.NotNil(t, qd.Stats) qs := qd.Stats.Builtin() require.NotNil(t, qs.Timings) @@ -876,8 +876,8 @@ func TestStats(t *testing.T) { }, param: "known", expected: func(t *testing.T, i interface{}) { - require.IsType(t, i, &queryData{}) - qd := i.(*queryData) + require.IsType(t, i, &QueryData{}) + qd := i.(*QueryData) require.NotNil(t, qd.Stats) j, err := json.Marshal(qd.Stats) require.NoError(t, err) @@ -1037,7 +1037,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "query": []string{"2"}, "time": []string{"123.4"}, }, - response: &queryData{ + response: &QueryData{ ResultType: parser.ValueTypeScalar, Result: promql.Scalar{ V: 2, @@ -1051,7 +1051,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "query": []string{"0.333"}, "time": []string{"1970-01-01T00:02:03Z"}, }, - response: &queryData{ + response: &QueryData{ ResultType: parser.ValueTypeScalar, Result: promql.Scalar{ V: 0.333, @@ -1065,7 +1065,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "query": []string{"0.333"}, "time": []string{"1970-01-01T01:02:03+01:00"}, }, - response: &queryData{ + response: &QueryData{ ResultType: parser.ValueTypeScalar, Result: promql.Scalar{ V: 0.333, @@ -1078,7 +1078,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E query: url.Values{ "query": []string{"0.333"}, }, - response: &queryData{ + response: &QueryData{ ResultType: parser.ValueTypeScalar, Result: promql.Scalar{ V: 0.333, @@ -1094,7 +1094,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "end": []string{"2"}, "step": []string{"1"}, }, - response: &queryData{ + response: &QueryData{ ResultType: parser.ValueTypeMatrix, Result: promql.Matrix{ promql.Series{ @@ -3180,7 +3180,7 @@ func BenchmarkRespond(b *testing.B) { for i := 0; i < 10000; i++ { points = append(points, promql.Point{V: float64(i * 1000000), T: int64(i)}) } - response := &queryData{ + response := &QueryData{ ResultType: parser.ValueTypeMatrix, Result: promql.Matrix{ promql.Series{ diff --git a/web/api/v1/json_codec_test.go b/web/api/v1/json_codec_test.go index c5b030ff9..20b8ac3a0 100644 --- a/web/api/v1/json_codec_test.go +++ b/web/api/v1/json_codec_test.go @@ -30,7 +30,7 @@ func TestJsonCodec_Encode(t *testing.T) { expected string }{ { - response: &queryData{ + response: &QueryData{ ResultType: parser.ValueTypeMatrix, Result: promql.Matrix{ promql.Series{ @@ -42,7 +42,7 @@ func TestJsonCodec_Encode(t *testing.T) { expected: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"foo"},"values":[[1,"1"]]}]}}`, }, { - response: &queryData{ + response: &QueryData{ ResultType: parser.ValueTypeMatrix, Result: promql.Matrix{ promql.Series{ From deba5120ead5af493e8a4dac4eb0171596b983e3 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Sat, 11 Feb 2023 15:34:25 +0100 Subject: [PATCH 020/632] Address PR feeedback: reduce log level. Signed-off-by: Charles Korn --- web/api/v1/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index e9c1182ca..6c912e9e8 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1602,7 +1602,7 @@ func (api *API) negotiateCodec(req *http.Request, resp *Response) Codec { } } - level.Warn(api.logger).Log("msg", "could not find suitable codec for response, falling back to default codec", "accept_header", acceptHeader) + level.Debug(api.logger).Log("msg", "could not find suitable codec for response, falling back to default codec", "accept_header", acceptHeader) return defaultCodec } From 0032ce06449781d0fc0bb5a4731e87e59aa3efae Mon Sep 17 00:00:00 2001 From: Patryk Prus Date: Sun, 26 Feb 2023 21:05:27 -0500 Subject: [PATCH 021/632] Render background in images to play nicely with dark mode Signed-off-by: Patryk Prus --- README.md | 2 +- documentation/images/architecture.svg | 4 +++- documentation/images/internal_architecture.svg | 4 +++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 9becf71aa..8b89bb01e 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ The features that distinguish Prometheus from other metrics and monitoring syste ## Architecture overview -![Architecture overview](https://cdn.jsdelivr.net/gh/prometheus/prometheus@c34257d069c630685da35bcef084632ffd5d6209/documentation/images/architecture.svg) +![Architecture overview](documentation/images/architecture.svg) ## Install diff --git a/documentation/images/architecture.svg b/documentation/images/architecture.svg index df93e13cb..4e1e85995 100644 --- a/documentation/images/architecture.svg +++ b/documentation/images/architecture.svg @@ -1,2 +1,4 @@ + + -
pull metrics


[Not supported by viewer]
HDD / SSD
[Not supported by viewer]
Pushgateway
Pushgateway
Short-lived jobs
Short-lived jobs
Jobs / Exporters
Jobs / Exporters
Storage
Storage
Retrieval
Retrieval
PromQL
PromQL
Prometheus Server
[Not supported by viewer]
Node
<b>Node</b>
Service Discovery

Service Discovery<div><br></div>
             find 
                 targets
[Not supported by viewer]
Prometheus Server
Prometheus Server
Alertmanager
Alertmanager
push alerts         


[Not supported by viewer]
Web UI
Web UI
Grafana
Grafana
API clients
API clients
PagerDuty
PagerDuty
Email
Email
  • DNS
  • Kubernetes
  • Consul
  • ...
  • Custom integration
[Not supported by viewer]
              notify
[Not supported by viewer]
...
...
\ No newline at end of file +
pull metrics


pull metrics...
HDD / SSD
HDD / SSD
Pushgateway
Pushgateway
Short-lived jobs
Short-lived jobs
Jobs / Exporters
Jobs / Exporters
Storage
Storage
Retrieval
Retrieval
PromQL
PromQL
Prometheus Server
Prometheus Server
Node
Node
Service Discovery

Service Discovery
             find 
                 targets
find...
Prometheus Server
Prometheus Server
Alertmanager
Alertmanager
push alerts         


push alerts...
Web UI
Web UI
Grafana
Grafana
API clients
API clients
PagerDuty
PagerDuty
Email
Email
  • DNS
  • Kubernetes
  • Consul
  • ...
  • Custom integration
DNSKubernetesConsul...Custom in...
              notify
              notify
...
...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/documentation/images/internal_architecture.svg b/documentation/images/internal_architecture.svg index 5948186a7..1242548dd 100644 --- a/documentation/images/internal_architecture.svg +++ b/documentation/images/internal_architecture.svg @@ -1,2 +1,4 @@ + + -
Fanout Storage
Fanout Storage
read/write
series data 
[Not supported by viewer]
Local
Storage
Local<br>Storage
disk
disk
write series data
write series data
Remote Storage
Remote Storage
send
alerts
[Not supported by viewer]
append rule results
append rule results
Rule Manager
Rule Manager
Notifier
Notifier
discover Alertmanager targets
discover Alertmanager targets
Notifier Discovery
Notifier Discovery
update
Alertmanager
targets
[Not supported by viewer]
Alertmanagers
Alertmanagers
send
alerts
[Not supported by viewer]
read/write
series data
[Not supported by viewer]
scrape
metrics
[Not supported by viewer]
Scrape Manager
Scrape Manager
append 
samples 
[Not supported by viewer]
update scrape targets
[Not supported by viewer]
discover scrape targets
discover scrape targets
Scrape Discovery
Scrape Discovery
query
query
PromQL
[Not supported by viewer]
read series data
read series data
Remote Read Endpoints
Remote Read Endpoints<br>
Remote Write Endpoints
Remote Write Endpoints
Targets
Targets
Service Discovery
Service Discovery
Web API/UI
Web API/UI
query
query
PromQL
[Not supported by viewer]
Reload Handler
Reload Handler
Termination
Handler
Termination<br>Handler<br>
(most other
components)
[Not supported by viewer]
reload
reload
terminate
terminate<br>
view/control
view/control
view/control
view/control
Web Clients
Web Clients
Prometheus Server
<b>Prometheus Server</b>
\ No newline at end of file +
Fanout Storage
Fanout Storage
read/write
series data 
read/write...
Local
Storage
Local...
disk
disk
write series data
write series data
Remote Storage
Remote Storage
send
alerts
send...
append rule results
append rule results
Rule Manager
Rule Manager
Notifier
Notifier
discover Alertmanager targets
discover Alertmanager targets
Notifier Discovery
Notifier Disco...
update
Alertmanager
targets
update...
Alertmanagers
Alertmanagers
send
alerts
send...
read/write
series data
read/write...
scrape
metrics
scrape...
Scrape Manager
Scrape Manager
append 
samples 
append...
update scrape targets
update scrape targets
discover scrape targets
discover scrape targets
Scrape Discovery
Scrape Discove...
query
query
PromQL
PromQL
read series data
read series data
Remote Read Endpoints
Remote Read En...
Remote Write Endpoints
Remote Write E...
Targets
Targets
Service Discovery
Service Discov...
Web API/UI
Web API/UI
query
query
PromQL
PromQL
Reload Handler
Reload Handler
Termination
Handler
Termination...
(most other
components)
(most other...
reload
reload
terminate
termin...
view/control
view/c...
view/control
view/control
Web Clients
Web Clients
Prometheus Server
Prometheus Server
Text is not SVG - cannot display
\ No newline at end of file From 46a28899a0e86b4b073bb3172a2ac4013ba87807 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Mon, 27 Feb 2023 13:27:09 +1100 Subject: [PATCH 022/632] Implement fully-featured content negotiation for API requests, and allow overriding the default API codec. Signed-off-by: Charles Korn --- go.mod | 2 +- web/api/v1/api.go | 78 ++++++++++++++++++++-------------------- web/api/v1/api_test.go | 41 ++++++++++++++++++--- web/api/v1/codec.go | 29 ++++++++++++++- web/api/v1/codec_test.go | 68 +++++++++++++++++++++++++++++++++++ web/api/v1/json_codec.go | 4 +-- 6 files changed, 175 insertions(+), 47 deletions(-) create mode 100644 web/api/v1/codec_test.go diff --git a/go.mod b/go.mod index ac94408e4..a12b3505d 100644 --- a/go.mod +++ b/go.mod @@ -160,7 +160,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 10cf6885d..cfac908fe 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -32,6 +32,7 @@ import ( "github.com/go-kit/log/level" "github.com/grafana/regexp" jsoniter "github.com/json-iterator/go" + "github.com/munnerz/goautoneg" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -68,20 +69,19 @@ const ( type errorType string const ( - errorNone errorType = "" - errorTimeout errorType = "timeout" - errorCanceled errorType = "canceled" - errorExec errorType = "execution" - errorBadData errorType = "bad_data" - errorInternal errorType = "internal" - errorUnavailable errorType = "unavailable" - errorNotFound errorType = "not_found" + errorNone errorType = "" + errorTimeout errorType = "timeout" + errorCanceled errorType = "canceled" + errorExec errorType = "execution" + errorBadData errorType = "bad_data" + errorInternal errorType = "internal" + errorUnavailable errorType = "unavailable" + errorNotFound errorType = "not_found" + errorNotAcceptable errorType = "not_acceptable" ) var LocalhostRepresentations = []string{"127.0.0.1", "localhost", "::1"} -var defaultCodec = JSONCodec{} - type apiError struct { typ errorType err error @@ -212,7 +212,7 @@ type API struct { remoteWriteHandler http.Handler remoteReadHandler http.Handler - codecs map[string]Codec + codecs []Codec } // NewAPI returns an initialized API type. @@ -271,11 +271,9 @@ func NewAPI( statsRenderer: defaultStatsRenderer, remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame), - - codecs: map[string]Codec{}, } - a.InstallCodec(defaultCodec) + a.InstallCodec(JSONCodec{}) if statsRenderer != nil { a.statsRenderer = statsRenderer @@ -289,13 +287,15 @@ func NewAPI( } // InstallCodec adds codec to this API's available codecs. -// If codec handles a content type handled by a codec already installed in this API, codec replaces the previous codec. +// Codecs installed first take precedence over codecs installed later when evaluating wildcards in Accept headers. +// The first installed codec is used as a fallback when the Accept header cannot be satisfied or if there is no Accept header. func (api *API) InstallCodec(codec Codec) { - if api.codecs == nil { - api.codecs = map[string]Codec{} - } + api.codecs = append(api.codecs, codec) +} - api.codecs[codec.ContentType()] = codec +// ClearCodecs removes all available codecs from this API, including the default codec installed by NewAPI. +func (api *API) ClearCodecs() { + api.codecs = nil } func setUnavailStatusOnTSDBNotReady(r apiFuncResult) apiFuncResult { @@ -1583,7 +1583,12 @@ func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface Warnings: warningStrings, } - codec := api.negotiateCodec(req, resp) + codec, err := api.negotiateCodec(req, resp) + if err != nil { + api.respondError(w, &apiError{errorNotAcceptable, err}, nil) + return + } + b, err := codec.Encode(resp) if err != nil { level.Error(api.logger).Log("msg", "error marshaling response", "err", err) @@ -1591,33 +1596,28 @@ func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface return } - w.Header().Set("Content-Type", codec.ContentType()) + w.Header().Set("Content-Type", codec.ContentType().String()) w.WriteHeader(http.StatusOK) if n, err := w.Write(b); err != nil { level.Error(api.logger).Log("msg", "error writing response", "bytesWritten", n, "err", err) } } -// HTTP content negotiation is hard (see https://developer.mozilla.org/en-US/docs/Web/HTTP/Content_negotiation). -// Ideally, we shouldn't be implementing this ourselves - https://github.com/golang/go/issues/19307 is an open proposal to add -// this to the Go stdlib and has links to a number of other implementations. -// -// This is an MVP, and doesn't support features like wildcards or weighting. -func (api *API) negotiateCodec(req *http.Request, resp *Response) Codec { - acceptHeader := req.Header.Get("Accept") - if acceptHeader == "" { - return defaultCodec - } - - for _, contentType := range strings.Split(acceptHeader, ",") { - codec, ok := api.codecs[strings.TrimSpace(contentType)] - if ok && codec.CanEncode(resp) { - return codec +func (api *API) negotiateCodec(req *http.Request, resp *Response) (Codec, error) { + for _, clause := range goautoneg.ParseAccept(req.Header.Get("Accept")) { + for _, codec := range api.codecs { + if codec.ContentType().Satisfies(clause) && codec.CanEncode(resp) { + return codec, nil + } } } - level.Debug(api.logger).Log("msg", "could not find suitable codec for response, falling back to default codec", "accept_header", acceptHeader) - return defaultCodec + defaultCodec := api.codecs[0] + if !defaultCodec.CanEncode(resp) { + return nil, fmt.Errorf("cannot encode response as %s", defaultCodec.ContentType()) + } + + return defaultCodec, nil } func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data interface{}) { @@ -1648,6 +1648,8 @@ func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data inter code = http.StatusInternalServerError case errorNotFound: code = http.StatusNotFound + case errorNotAcceptable: + code = http.StatusNotAcceptable default: code = http.StatusInternalServerError } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 0531c4fe5..90cf084ac 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -2769,9 +2769,11 @@ func TestRespondSuccess(t *testing.T) { logger: log.NewNopLogger(), } - api.InstallCodec(&testCodec{contentType: "test/cannot-encode", canEncode: false}) - api.InstallCodec(&testCodec{contentType: "test/can-encode", canEncode: true}) - api.InstallCodec(&testCodec{contentType: "test/can-encode-2", canEncode: true}) + api.ClearCodecs() + api.InstallCodec(JSONCodec{}) + api.InstallCodec(&testCodec{contentType: MIMEType{"test", "cannot-encode"}, canEncode: false}) + api.InstallCodec(&testCodec{contentType: MIMEType{"test", "can-encode"}, canEncode: true}) + api.InstallCodec(&testCodec{contentType: MIMEType{"test", "can-encode-2"}, canEncode: true}) s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { api.respond(w, r, "test", nil) @@ -2854,6 +2856,34 @@ func TestRespondSuccess(t *testing.T) { } } +func TestRespondSuccess_DefaultCodecCannotEncodeResponse(t *testing.T) { + api := API{ + logger: log.NewNopLogger(), + } + + api.ClearCodecs() + api.InstallCodec(&testCodec{contentType: MIMEType{"application", "default-format"}, canEncode: false}) + + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + api.respond(w, r, "test", nil) + })) + defer s.Close() + + req, err := http.NewRequest(http.MethodGet, s.URL, nil) + require.NoError(t, err) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + body, err := io.ReadAll(resp.Body) + defer resp.Body.Close() + require.NoError(t, err) + + require.Equal(t, http.StatusNotAcceptable, resp.StatusCode) + require.Equal(t, "application/json", resp.Header.Get("Content-Type")) + require.Equal(t, `{"status":"error","errorType":"not_acceptable","error":"cannot encode response as application/default-format"}`, string(body)) +} + func TestRespondError(t *testing.T) { s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { api := API{} @@ -3193,6 +3223,7 @@ func BenchmarkRespond(b *testing.B) { } b.ResetTimer() api := API{} + api.InstallCodec(JSONCodec{}) for n := 0; n < b.N; n++ { api.respond(&testResponseWriter, request, response, nil) } @@ -3307,11 +3338,11 @@ func TestGetGlobalURL(t *testing.T) { } type testCodec struct { - contentType string + contentType MIMEType canEncode bool } -func (t *testCodec) ContentType() string { +func (t *testCodec) ContentType() MIMEType { return t.contentType } diff --git a/web/api/v1/codec.go b/web/api/v1/codec.go index d11bb1fa0..492e00a74 100644 --- a/web/api/v1/codec.go +++ b/web/api/v1/codec.go @@ -13,10 +13,12 @@ package v1 +import "github.com/munnerz/goautoneg" + // A Codec performs encoding of API responses. type Codec interface { // ContentType returns the MIME time that this Codec emits. - ContentType() string + ContentType() MIMEType // CanEncode determines if this Codec can encode resp. CanEncode(resp *Response) bool @@ -24,3 +26,28 @@ type Codec interface { // Encode encodes resp, ready for transmission to an API consumer. Encode(resp *Response) ([]byte, error) } + +type MIMEType struct { + Type string + SubType string +} + +func (m MIMEType) String() string { + return m.Type + "/" + m.SubType +} + +func (m MIMEType) Satisfies(accept goautoneg.Accept) bool { + if accept.Type == "*" && accept.SubType == "*" { + return true + } + + if accept.Type == m.Type && accept.SubType == "*" { + return true + } + + if accept.Type == m.Type && accept.SubType == m.SubType { + return true + } + + return false +} diff --git a/web/api/v1/codec_test.go b/web/api/v1/codec_test.go new file mode 100644 index 000000000..911bf206e --- /dev/null +++ b/web/api/v1/codec_test.go @@ -0,0 +1,68 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "testing" + + "github.com/munnerz/goautoneg" + "github.com/stretchr/testify/require" +) + +func TestMIMEType_String(t *testing.T) { + m := MIMEType{Type: "application", SubType: "json"} + + require.Equal(t, "application/json", m.String()) +} + +func TestMIMEType_Satisfies(t *testing.T) { + m := MIMEType{Type: "application", SubType: "json"} + + scenarios := map[string]struct { + accept goautoneg.Accept + expected bool + }{ + "exact match": { + accept: goautoneg.Accept{Type: "application", SubType: "json"}, + expected: true, + }, + "sub-type wildcard match": { + accept: goautoneg.Accept{Type: "application", SubType: "*"}, + expected: true, + }, + "full wildcard match": { + accept: goautoneg.Accept{Type: "*", SubType: "*"}, + expected: true, + }, + "inverted": { + accept: goautoneg.Accept{Type: "json", SubType: "application"}, + expected: false, + }, + "inverted sub-type wildcard": { + accept: goautoneg.Accept{Type: "json", SubType: "*"}, + expected: false, + }, + "complete mismatch": { + accept: goautoneg.Accept{Type: "text", SubType: "plain"}, + expected: false, + }, + } + + for name, scenario := range scenarios { + t.Run(name, func(t *testing.T) { + actual := m.Satisfies(scenario.accept) + require.Equal(t, scenario.expected, actual) + }) + } +} diff --git a/web/api/v1/json_codec.go b/web/api/v1/json_codec.go index 455af717d..79ebfee18 100644 --- a/web/api/v1/json_codec.go +++ b/web/api/v1/json_codec.go @@ -34,8 +34,8 @@ func init() { // JSONCodec is a Codec that encodes API responses as JSON. type JSONCodec struct{} -func (j JSONCodec) ContentType() string { - return "application/json" +func (j JSONCodec) ContentType() MIMEType { + return MIMEType{Type: "application", SubType: "json"} } func (j JSONCodec) CanEncode(_ *Response) bool { From c9d06f2826bef4606e4a8c4edea259f012079f10 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Mon, 13 Mar 2023 11:15:45 +0530 Subject: [PATCH 023/632] tsdb: Replay m-map chunk only when required M-map chunks replayed on startup are discarded if there was no WAL and no snapshot loaded, because there is no series created in the Head that it can map to. So only load m-map chunks from disk if there is either a snapshot loaded or there is WAL on disk. Signed-off-by: Ganesh Vernekar --- tsdb/head.go | 44 ++++++++++++++++++++++++++++---------------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/tsdb/head.go b/tsdb/head.go index b28f5aca5..f3e0a7f6c 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -590,6 +590,7 @@ func (h *Head) Init(minValidTime int64) error { snapIdx, snapOffset := -1, 0 refSeries := make(map[chunks.HeadSeriesRef]*memSeries) + snapshotLoaded := false if h.opts.EnableMemorySnapshotOnShutdown { level.Info(h.logger).Log("msg", "Chunk snapshot is enabled, replaying from the snapshot") // If there are any WAL files, there should be at least one WAL file with an index that is current or newer @@ -619,6 +620,7 @@ func (h *Head) Init(minValidTime int64) error { var err error snapIdx, snapOffset, refSeries, err = h.loadChunkSnapshot() if err == nil { + snapshotLoaded = true level.Info(h.logger).Log("msg", "Chunk snapshot loading time", "duration", time.Since(start).String()) } if err != nil { @@ -636,26 +638,36 @@ func (h *Head) Init(minValidTime int64) error { } mmapChunkReplayStart := time.Now() - mmappedChunks, oooMmappedChunks, lastMmapRef, err := h.loadMmappedChunks(refSeries) - if err != nil { - // TODO(codesome): clear out all m-map chunks here for refSeries. - level.Error(h.logger).Log("msg", "Loading on-disk chunks failed", "err", err) - if _, ok := errors.Cause(err).(*chunks.CorruptionErr); ok { - h.metrics.mmapChunkCorruptionTotal.Inc() - } - - // Discard snapshot data since we need to replay the WAL for the missed m-map chunks data. - snapIdx, snapOffset = -1, 0 - - // If this fails, data will be recovered from WAL. - // Hence we wont lose any data (given WAL is not corrupt). - mmappedChunks, oooMmappedChunks, lastMmapRef, err = h.removeCorruptedMmappedChunks(err) + var ( + mmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk + oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk + lastMmapRef chunks.ChunkDiskMapperRef + err error + ) + if snapshotLoaded || h.wal != nil { + // If snapshot was not loaded and if there is no WAL, then m-map chunks will be discarded + // anyway. So we only load m-map chunks when it won't be discarded. + mmappedChunks, oooMmappedChunks, lastMmapRef, err = h.loadMmappedChunks(refSeries) if err != nil { - return err + // TODO(codesome): clear out all m-map chunks here for refSeries. + level.Error(h.logger).Log("msg", "Loading on-disk chunks failed", "err", err) + if _, ok := errors.Cause(err).(*chunks.CorruptionErr); ok { + h.metrics.mmapChunkCorruptionTotal.Inc() + } + + // Discard snapshot data since we need to replay the WAL for the missed m-map chunks data. + snapIdx, snapOffset = -1, 0 + + // If this fails, data will be recovered from WAL. + // Hence we wont lose any data (given WAL is not corrupt). + mmappedChunks, oooMmappedChunks, lastMmapRef, err = h.removeCorruptedMmappedChunks(err) + if err != nil { + return err + } } + level.Info(h.logger).Log("msg", "On-disk memory mappable chunks replay completed", "duration", time.Since(mmapChunkReplayStart).String()) } - level.Info(h.logger).Log("msg", "On-disk memory mappable chunks replay completed", "duration", time.Since(mmapChunkReplayStart).String()) if h.wal == nil { level.Info(h.logger).Log("msg", "WAL not found") return nil From 1c3f1216b303aabbaac8f3cf86e186398d717c37 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Mon, 13 Mar 2023 12:23:57 +0530 Subject: [PATCH 024/632] tsdb: Test querying after missing wbl with snapshots enabled If the snapshot was enabled with some ooo mmap chunks on disk, and wbl was removed between restarts, then we should still be able to query the ooo mmap chunks after a restart. This test shows that we are not able to query those ooo mmap chunks after a restart under this situation. Signed-off-by: Ganesh Vernekar --- tsdb/db_test.go | 109 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) diff --git a/tsdb/db_test.go b/tsdb/db_test.go index cc65069e4..926af273d 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -4433,6 +4433,115 @@ func TestOOOCompactionWithDisabledWriteLog(t *testing.T) { verifySamples(db.Blocks()[1], 250, 350) } +// TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL tests the scenario where the WBL goes +// missing after a restart while snapshot was enabled, but the query still returns the right +// data from the mmap chunks. +func TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T) { + dir := t.TempDir() + + opts := DefaultOptions() + opts.OutOfOrderCapMax = 10 + opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds() + opts.EnableMemorySnapshotOnShutdown = true + + db, err := Open(dir, nil, nil, opts, nil) + require.NoError(t, err) + db.DisableCompactions() // We want to manually call it. + t.Cleanup(func() { + require.NoError(t, db.Close()) + }) + + series1 := labels.FromStrings("foo", "bar1") + series2 := labels.FromStrings("foo", "bar2") + + addSamples := func(fromMins, toMins int64) { + app := db.Appender(context.Background()) + for min := fromMins; min <= toMins; min++ { + ts := min * time.Minute.Milliseconds() + _, err := app.Append(0, series1, ts, float64(ts)) + require.NoError(t, err) + _, err = app.Append(0, series2, ts, float64(2*ts)) + require.NoError(t, err) + } + require.NoError(t, app.Commit()) + } + + // Add an in-order samples. + addSamples(250, 350) + + // Add ooo samples that will result into a single block. + addSamples(90, 110) // The sample 110 will not be in m-map chunks. + + // Checking that there are some ooo m-map chunks. + for _, lbls := range []labels.Labels{series1, series2} { + ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) + require.NoError(t, err) + require.False(t, created) + require.Equal(t, 2, len(ms.ooo.oooMmappedChunks)) + require.NotNil(t, ms.ooo.oooHeadChunk) + } + + // Restart DB. + require.NoError(t, db.Close()) + + // For some reason wbl goes missing. + require.NoError(t, os.RemoveAll(path.Join(dir, "wbl"))) + + db, err = Open(dir, nil, nil, opts, nil) + require.NoError(t, err) + db.DisableCompactions() // We want to manually call it. + + // Check ooo m-map chunks again. + for _, lbls := range []labels.Labels{series1, series2} { + ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) + require.NoError(t, err) + require.False(t, created) + require.Equal(t, 2, len(ms.ooo.oooMmappedChunks)) + require.Equal(t, 109*time.Minute.Milliseconds(), ms.ooo.oooMmappedChunks[1].maxTime) + require.Nil(t, ms.ooo.oooHeadChunk) // Because of missing wbl. + } + + verifySamples := func(fromMins, toMins int64) { + series1Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1) + series2Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1) + for min := fromMins; min <= toMins; min++ { + ts := min * time.Minute.Milliseconds() + series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil}) + series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil}) + } + expRes := map[string][]tsdbutil.Sample{ + series1.String(): series1Samples, + series2.String(): series2Samples, + } + + q, err := db.Querier(context.Background(), fromMins*time.Minute.Milliseconds(), toMins*time.Minute.Milliseconds()) + require.NoError(t, err) + + actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*")) + require.Equal(t, expRes, actRes) + } + + // Checking for expected ooo data from mmap chunks. + verifySamples(90, 109) + + // Compaction should also work fine. + require.Equal(t, len(db.Blocks()), 0) + require.NoError(t, db.CompactOOOHead()) + require.Equal(t, len(db.Blocks()), 1) // One block from OOO data. + require.Equal(t, int64(0), db.Blocks()[0].MinTime()) + require.Equal(t, 120*time.Minute.Milliseconds(), db.Blocks()[0].MaxTime()) + + // Checking that ooo chunk is empty in Head. + for _, lbls := range []labels.Labels{series1, series2} { + ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) + require.NoError(t, err) + require.False(t, created) + require.Nil(t, ms.ooo) + } + + verifySamples(90, 109) +} + func Test_Querier_OOOQuery(t *testing.T) { opts := DefaultOptions() opts.OutOfOrderCapMax = 30 From 2af44f955871a18bd043f807441888aedd31dff7 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Mon, 13 Mar 2023 12:27:46 +0530 Subject: [PATCH 025/632] tsdb: Update OOO min/max time properly after replaying m-map chunks Without this fix, if snapshots were enabled, and wbl goes missing between restarts, then TSDB does not recognize that there are ooo mmap chunks on disk and we cannot query them until those chunks are compacted into blocks. Signed-off-by: Ganesh Vernekar --- tsdb/head.go | 1 + 1 file changed, 1 insertion(+) diff --git a/tsdb/head.go b/tsdb/head.go index f3e0a7f6c..41933d7a0 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -836,6 +836,7 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) numSamples: numSamples, }) + h.updateMinOOOMaxOOOTime(mint, maxt) return nil } From 9e5cc340c3d67fb8c1ff8d1e701e68de6859b268 Mon Sep 17 00:00:00 2001 From: Francis Begyn Date: Sat, 12 Nov 2022 20:19:33 +0100 Subject: [PATCH 026/632] stop github actions from sending me mail Currently github actions keep sending me mails about things that should only run on the prometheus organisation actions. This change makes sure to check who owns the repository before running the CI workflow. Signed-off-by: Francis Begyn --- .github/workflows/buf.yml | 1 + .github/workflows/lock.yml | 1 + .github/workflows/repo_sync.yml | 1 + 3 files changed, 3 insertions(+) diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index 2bfee7138..567ecc000 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -7,6 +7,7 @@ jobs: buf: name: lint and publish runs-on: ubuntu-latest + if: github.repository_owner == 'prometheus' steps: - uses: actions/checkout@v3 - uses: bufbuild/buf-setup-action@v1.13.1 diff --git a/.github/workflows/lock.yml b/.github/workflows/lock.yml index 1725fb4f2..c6ac3d74e 100644 --- a/.github/workflows/lock.yml +++ b/.github/workflows/lock.yml @@ -14,6 +14,7 @@ concurrency: jobs: action: runs-on: ubuntu-latest + if: github.repository_owner == 'prometheus' steps: - uses: dessant/lock-threads@v4 with: diff --git a/.github/workflows/repo_sync.yml b/.github/workflows/repo_sync.yml index 37be80d45..9526cd2fe 100644 --- a/.github/workflows/repo_sync.yml +++ b/.github/workflows/repo_sync.yml @@ -6,6 +6,7 @@ on: jobs: repo_sync: runs-on: ubuntu-latest + if: github.repository_owner == 'prometheus' container: image: quay.io/prometheus/golang-builder steps: From 1cc28ce9ca52453d6fc73a398158c4c135f90a47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Scheibe?= Date: Sat, 18 Mar 2023 20:11:35 +0100 Subject: [PATCH 027/632] chore: Fix documentation on signal to shut down instance MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: René Scheibe --- docs/getting_started.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/getting_started.md b/docs/getting_started.md index 11d8d0fb8..e89ac705e 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -264,4 +264,4 @@ process ID. While Prometheus does have recovery mechanisms in the case that there is an abrupt process failure it is recommend to use the `SIGTERM` signal to cleanly shutdown a Prometheus instance. If you're running on Linux this can be performed -by using `kill -s SIGHUP `, replacing `` with your Prometheus process ID. +by using `kill -s SIGTERM `, replacing `` with your Prometheus process ID. From 97c7fffbb870a83625a7846e84e60c577d4575e4 Mon Sep 17 00:00:00 2001 From: Filip Petkovski Date: Wed, 22 Mar 2023 10:02:10 +0100 Subject: [PATCH 028/632] parser: Allow parsing arbitrary functions In Thanos we would like to start experimenting with custom functions that are currently not part of the PromQL spec. We would do this by adding an implementation for those functions in the Thanos engine: https://github.com/thanos-community/promql-engine and allow users to decide which engine they want to use on a per-query basis. Since we use the PromQL parser from Prometheus, injecting functions in the global `Functions` variable would mean they also become available for the Prometheus engine. To avoid this side-effect, this commit exposes a Parser interface in which the supported functions can be injected as an option. If not functions are injected, the parser implementation will default to the functions defined in the global Functions variable. Signed-off-by: Filip Petkovski --- promql/parser/functions.go | 4 +- promql/parser/generated_parser.y | 2 +- promql/parser/generated_parser.y.go | 2 +- promql/parser/parse.go | 117 ++++++++++++++++++---------- promql/parser/parse_test.go | 21 ++++- 5 files changed, 97 insertions(+), 49 deletions(-) diff --git a/promql/parser/functions.go b/promql/parser/functions.go index 450021328..479c7f635 100644 --- a/promql/parser/functions.go +++ b/promql/parser/functions.go @@ -387,7 +387,7 @@ var Functions = map[string]*Function{ } // getFunction returns a predefined Function object for the given name. -func getFunction(name string) (*Function, bool) { - function, ok := Functions[name] +func getFunction(name string, functions map[string]*Function) (*Function, bool) { + function, ok := functions[name] return function, ok } diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index 461e854ac..fe3adfb9a 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -339,7 +339,7 @@ grouping_label : maybe_label function_call : IDENTIFIER function_call_body { - fn, exist := getFunction($1.Val) + fn, exist := getFunction($1.Val, yylex.(*parser).functions) if !exist{ yylex.(*parser).addParseErrf($1.PositionRange(),"unknown function with name %q", $1.Val) } diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index 3a0e8bf69..1ac95036a 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -1210,7 +1210,7 @@ yydefault: yyDollar = yyS[yypt-2 : yypt+1] //line promql/parser/generated_parser.y:341 { - fn, exist := getFunction(yyDollar[1].item.Val) + fn, exist := getFunction(yyDollar[1].item.Val, yylex.(*parser).functions) if !exist { yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "unknown function with name %q", yyDollar[1].item.Val) } diff --git a/promql/parser/parse.go b/promql/parser/parse.go index 6c37ce6fc..7e7ec71f5 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -37,12 +37,20 @@ var parserPool = sync.Pool{ }, } +type Parser interface { + ParseExpr() (Expr, error) + Close() +} + type parser struct { lex Lexer inject ItemType injecting bool + // functions contains all functions supported by the parser instance. + functions map[string]*Function + // Everytime an Item is lexed that could be the end // of certain expressions its end position is stored here. lastClosing Pos @@ -53,6 +61,62 @@ type parser struct { parseErrors ParseErrors } +type Opt func(p *parser) + +func WithFunctions(functions map[string]*Function) Opt { + return func(p *parser) { + p.functions = functions + } +} + +// NewParser returns a new parser. +func NewParser(input string, opts ...Opt) *parser { + p := parserPool.Get().(*parser) + + p.functions = Functions + p.injecting = false + p.parseErrors = nil + p.generatedParserResult = nil + + // Clear lexer struct before reusing. + p.lex = Lexer{ + input: input, + state: lexStatements, + } + + // Apply user define options. + for _, opt := range opts { + opt(p) + } + + return p +} + +func (p *parser) ParseExpr() (expr Expr, err error) { + defer p.recover(&err) + + parseResult := p.parseGenerated(START_EXPRESSION) + + if parseResult != nil { + expr = parseResult.(Expr) + } + + // Only typecheck when there are no syntax errors. + if len(p.parseErrors) == 0 { + p.checkAST(expr) + } + + if len(p.parseErrors) != 0 { + err = p.parseErrors + } + + return expr, err +} + +func (p *parser) Close() { + defer parserPool.Put(p) +} + // ParseErr wraps a parsing error with line and position context. type ParseErr struct { PositionRange PositionRange @@ -105,32 +169,15 @@ func (errs ParseErrors) Error() string { // ParseExpr returns the expression parsed from the input. func ParseExpr(input string) (expr Expr, err error) { - p := newParser(input) - defer parserPool.Put(p) - defer p.recover(&err) - - parseResult := p.parseGenerated(START_EXPRESSION) - - if parseResult != nil { - expr = parseResult.(Expr) - } - - // Only typecheck when there are no syntax errors. - if len(p.parseErrors) == 0 { - p.checkAST(expr) - } - - if len(p.parseErrors) != 0 { - err = p.parseErrors - } - - return expr, err + p := NewParser(input) + defer p.Close() + return p.ParseExpr() } // ParseMetric parses the input into a metric func ParseMetric(input string) (m labels.Labels, err error) { - p := newParser(input) - defer parserPool.Put(p) + p := NewParser(input) + defer p.Close() defer p.recover(&err) parseResult := p.parseGenerated(START_METRIC) @@ -148,8 +195,8 @@ func ParseMetric(input string) (m labels.Labels, err error) { // ParseMetricSelector parses the provided textual metric selector into a list of // label matchers. func ParseMetricSelector(input string) (m []*labels.Matcher, err error) { - p := newParser(input) - defer parserPool.Put(p) + p := NewParser(input) + defer p.Close() defer p.recover(&err) parseResult := p.parseGenerated(START_METRIC_SELECTOR) @@ -164,22 +211,6 @@ func ParseMetricSelector(input string) (m []*labels.Matcher, err error) { return m, err } -// newParser returns a new parser. -func newParser(input string) *parser { - p := parserPool.Get().(*parser) - - p.injecting = false - p.parseErrors = nil - p.generatedParserResult = nil - - // Clear lexer struct before reusing. - p.lex = Lexer{ - input: input, - state: lexStatements, - } - return p -} - // SequenceValue is an omittable value in a sequence of time series values. type SequenceValue struct { Value float64 @@ -200,10 +231,10 @@ type seriesDescription struct { // ParseSeriesDesc parses the description of a time series. func ParseSeriesDesc(input string) (labels labels.Labels, values []SequenceValue, err error) { - p := newParser(input) + p := NewParser(input) p.lex.seriesDesc = true - defer parserPool.Put(p) + defer p.Close() defer p.recover(&err) parseResult := p.parseGenerated(START_SERIES_DESCRIPTION) @@ -799,7 +830,7 @@ func MustLabelMatcher(mt labels.MatchType, name, val string) *labels.Matcher { } func MustGetFunction(name string) *Function { - f, ok := getFunction(name) + f, ok := getFunction(name, Functions) if !ok { panic(fmt.Errorf("function %q does not exist", name)) } diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index df66d9381..ce609a4ee 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -3714,7 +3714,7 @@ func TestParseSeries(t *testing.T) { } func TestRecoverParserRuntime(t *testing.T) { - p := newParser("foo bar") + p := NewParser("foo bar") var err error defer func() { @@ -3728,7 +3728,7 @@ func TestRecoverParserRuntime(t *testing.T) { } func TestRecoverParserError(t *testing.T) { - p := newParser("foo bar") + p := NewParser("foo bar") var err error e := errors.New("custom error") @@ -3776,3 +3776,20 @@ func TestExtractSelectors(t *testing.T) { require.Equal(t, expected, ExtractSelectors(expr)) } } + +func TestParseCustomFunctions(t *testing.T) { + funcs := Functions + funcs["custom_func"] = &Function{ + Name: "custom_func", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + } + input := "custom_func(metric[1m])" + p := NewParser(input, WithFunctions(funcs)) + expr, err := p.ParseExpr() + require.NoError(t, err) + + call, ok := expr.(*Call) + require.True(t, ok) + require.Equal(t, "custom_func", call.Func.Name) +} From 3d7783e6633dcfd79078ac30e100873c6f53fca3 Mon Sep 17 00:00:00 2001 From: Filip Petkovski Date: Wed, 22 Mar 2023 10:20:16 +0100 Subject: [PATCH 029/632] Add nolint for NewParser function Signed-off-by: Filip Petkovski --- promql/parser/parse.go | 1 + 1 file changed, 1 insertion(+) diff --git a/promql/parser/parse.go b/promql/parser/parse.go index 7e7ec71f5..ad8890f29 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -70,6 +70,7 @@ func WithFunctions(functions map[string]*Function) Opt { } // NewParser returns a new parser. +// nolint:revive func NewParser(input string, opts ...Opt) *parser { p := parserPool.Get().(*parser) From b987afa7ef948461100b914fb6aea5e7195fd078 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 22 Mar 2023 15:46:02 +0000 Subject: [PATCH 030/632] labels: simplify call to get Labels from Builder It took a `Labels` where the memory could be re-used, but in practice this hardly ever benefitted. Especially after converting `relabel.Process` to `relabel.ProcessBuilder`. Comparing the parameter to `nil` was a bug; `EmptyLabels` is not `nil` so the slice was reallocated multiple times by `append`. Lastly `Builder.Labels()` now estimates that the final size will depend on labels added and deleted. Signed-off-by: Bryan Boreham --- cmd/promtool/rules.go | 2 +- model/labels/labels.go | 16 +++------------- model/labels/labels_string.go | 9 ++++----- model/labels/labels_test.go | 10 +++++----- model/relabel/relabel.go | 2 +- notifier/notifier.go | 2 +- promql/engine.go | 10 +++++----- promql/functions.go | 8 ++++---- promql/parser/generated_parser.y | 2 +- promql/parser/generated_parser.y.go | 2 +- rules/alerting.go | 6 +++--- rules/recording.go | 2 +- scrape/scrape.go | 4 ++-- scrape/target.go | 4 ++-- scrape/target_test.go | 2 +- storage/remote/read.go | 2 +- tsdb/compact_test.go | 6 +++--- 17 files changed, 39 insertions(+), 50 deletions(-) diff --git a/cmd/promtool/rules.go b/cmd/promtool/rules.go index 2b5ed1d78..aedc7bcb9 100644 --- a/cmd/promtool/rules.go +++ b/cmd/promtool/rules.go @@ -163,7 +163,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName }) lb.Set(labels.MetricName, ruleName) - lbls := lb.Labels(labels.EmptyLabels()) + lbls := lb.Labels() for _, value := range sample.Values { if err := app.add(ctx, lbls, timestamp.FromTime(value.Timestamp.Time()), float64(value.Value)); err != nil { diff --git a/model/labels/labels.go b/model/labels/labels.go index 6de001c3c..4e87eb9b3 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -570,24 +570,14 @@ func contains(s []Label, n string) bool { return false } -// Labels returns the labels from the builder, adding them to res if non-nil. -// Argument res can be the same as b.base, if caller wants to overwrite that slice. +// Labels returns the labels from the builder. // If no modifications were made, the original labels are returned. -func (b *Builder) Labels(res Labels) Labels { +func (b *Builder) Labels() Labels { if len(b.del) == 0 && len(b.add) == 0 { return b.base } - if res == nil { - // In the general case, labels are removed, modified or moved - // rather than added. - res = make(Labels, 0, len(b.base)) - } else { - res = res[:0] - } - // Justification that res can be the same slice as base: in this loop - // we move forward through base, and either skip an element or assign - // it to res at its current position or an earlier position. + res := make(Labels, 0, len(b.base)-len(b.del)+len(b.add)) for _, l := range b.base { if slices.Contains(b.del, l.Name) || contains(b.add, l.Name) { continue diff --git a/model/labels/labels_string.go b/model/labels/labels_string.go index 98db29d25..9badf667c 100644 --- a/model/labels/labels_string.go +++ b/model/labels/labels_string.go @@ -158,7 +158,7 @@ func (ls Labels) MatchLabels(on bool, names ...string) Labels { b.Del(MetricName) b.Del(names...) } - return b.Labels(EmptyLabels()) + return b.Labels() } // Hash returns a hash value for the label set. @@ -624,10 +624,9 @@ func contains(s []Label, n string) bool { return false } -// Labels returns the labels from the builder, adding them to res if non-nil. -// Argument res can be the same as b.base, if caller wants to overwrite that slice. +// Labels returns the labels from the builder. // If no modifications were made, the original labels are returned. -func (b *Builder) Labels(res Labels) Labels { +func (b *Builder) Labels() Labels { if len(b.del) == 0 && len(b.add) == 0 { return b.base } @@ -637,7 +636,7 @@ func (b *Builder) Labels(res Labels) Labels { a, d := 0, 0 bufSize := len(b.base.data) + labelsSize(b.add) - buf := make([]byte, 0, bufSize) // TODO: see if we can re-use the buffer from res. + buf := make([]byte, 0, bufSize) for pos := 0; pos < len(b.base.data); { oldPos := pos var lName string diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index 588a84b98..9e60c2251 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -596,7 +596,7 @@ func TestBuilder(t *testing.T) { b.Keep(tcase.keep...) } b.Del(tcase.del...) - require.Equal(t, tcase.want, b.Labels(EmptyLabels())) + require.Equal(t, tcase.want, b.Labels()) // Check what happens when we call Range and mutate the builder. b.Range(func(l Label) { @@ -604,7 +604,7 @@ func TestBuilder(t *testing.T) { b.Del(l.Name) } }) - require.Equal(t, tcase.want.BytesWithoutLabels(nil, "aaa", "bbb"), b.Labels(tcase.base).Bytes(nil)) + require.Equal(t, tcase.want.BytesWithoutLabels(nil, "aaa", "bbb"), b.Labels().Bytes(nil)) }) } } @@ -669,7 +669,7 @@ func BenchmarkLabels_Hash(b *testing.B) { // Label ~20B name, 50B value. b.Set(fmt.Sprintf("abcdefghijabcdefghijabcdefghij%d", i), fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)) } - return b.Labels(EmptyLabels()) + return b.Labels() }(), }, { @@ -680,7 +680,7 @@ func BenchmarkLabels_Hash(b *testing.B) { // Label ~50B name, 50B value. b.Set(fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i), fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)) } - return b.Labels(EmptyLabels()) + return b.Labels() }(), }, { @@ -729,7 +729,7 @@ func BenchmarkBuilder(b *testing.B) { for _, l := range m { builder.Set(l.Name, l.Value) } - l = builder.Labels(EmptyLabels()) + l = builder.Labels() } require.Equal(b, 9, l.Len()) } diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index 5ef79b4a7..5027c3963 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -211,7 +211,7 @@ func Process(lbls labels.Labels, cfgs ...*Config) (ret labels.Labels, keep bool) if !ProcessBuilder(lb, cfgs...) { return labels.EmptyLabels(), false } - return lb.Labels(lbls), true + return lb.Labels(), true } // ProcessBuilder is like Process, but the caller passes a labels.Builder diff --git a/notifier/notifier.go b/notifier/notifier.go index 79697d079..c3b2e5c7e 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -359,7 +359,7 @@ func (n *Manager) Send(alerts ...*Alert) { } }) - a.Labels = lb.Labels(a.Labels) + a.Labels = lb.Labels() } alerts = n.relabelAlerts(alerts) diff --git a/promql/engine.go b/promql/engine.go index ddfb26b13..5682ea0f5 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -2193,7 +2193,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V } } - ret := enh.lb.Labels(labels.EmptyLabels()) + ret := enh.lb.Labels() enh.resultMetric[str] = ret return ret } @@ -2233,7 +2233,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala } func dropMetricName(l labels.Labels) labels.Labels { - return labels.NewBuilder(l).Del(labels.MetricName).Labels(labels.EmptyLabels()) + return labels.NewBuilder(l).Del(labels.MetricName).Labels() } // scalarBinop evaluates a binary operation between two Scalars. @@ -2367,7 +2367,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without if op == parser.COUNT_VALUES { enh.resetBuilder(metric) enh.lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64)) - metric = enh.lb.Labels(labels.EmptyLabels()) + metric = enh.lb.Labels() // We've changed the metric so we have to recompute the grouping key. recomputeGroupingKey = true @@ -2389,10 +2389,10 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without if without { enh.lb.Del(grouping...) enh.lb.Del(labels.MetricName) - m = enh.lb.Labels(labels.EmptyLabels()) + m = enh.lb.Labels() } else if len(grouping) > 0 { enh.lb.Keep(grouping...) - m = enh.lb.Labels(labels.EmptyLabels()) + m = enh.lb.Labels() } else { m = labels.EmptyLabels() } diff --git a/promql/functions.go b/promql/functions.go index 3da38ea0f..e1a02ac8e 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -960,7 +960,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev if !ok { sample.Metric = labels.NewBuilder(sample.Metric). Del(excludedLabels...). - Labels(labels.EmptyLabels()) + Labels() mb = &metricWithBuckets{sample.Metric, nil} enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb @@ -1080,7 +1080,7 @@ func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNod if len(res) > 0 { lb.Set(dst, string(res)) } - outMetric = lb.Labels(labels.EmptyLabels()) + outMetric = lb.Labels() enh.Dmn[h] = outMetric } } @@ -1148,7 +1148,7 @@ func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe lb.Set(dst, strval) } - outMetric = lb.Labels(labels.EmptyLabels()) + outMetric = lb.Labels() enh.Dmn[h] = outMetric } @@ -1414,7 +1414,7 @@ func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels { } } - return b.Labels(labels.EmptyLabels()) + return b.Labels() } func stringFromArg(e parser.Expr) string { diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index 461e854ac..b1c604eec 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -567,7 +567,7 @@ label_matcher : IDENTIFIER match_op STRING */ metric : metric_identifier label_set - { b := labels.NewBuilder($2); b.Set(labels.MetricName, $1.Val); $$ = b.Labels(labels.EmptyLabels()) } + { b := labels.NewBuilder($2); b.Set(labels.MetricName, $1.Val); $$ = b.Labels() } | label_set {$$ = $1} ; diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index 3a0e8bf69..2cf3e06b9 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -1494,7 +1494,7 @@ yydefault: { b := labels.NewBuilder(yyDollar[2].labels) b.Set(labels.MetricName, yyDollar[1].item.Val) - yyVAL.labels = b.Labels(labels.EmptyLabels()) + yyVAL.labels = b.Labels() } case 96: yyDollar = yyS[yypt-1 : yypt+1] diff --git a/rules/alerting.go b/rules/alerting.go index 70e2241f7..c793b90cb 100644 --- a/rules/alerting.go +++ b/rules/alerting.go @@ -234,7 +234,7 @@ func (r *AlertingRule) sample(alert *Alert, ts time.Time) promql.Sample { lb.Set(alertStateLabel, alert.State.String()) s := promql.Sample{ - Metric: lb.Labels(labels.EmptyLabels()), + Metric: lb.Labels(), Point: promql.Point{T: timestamp.FromTime(ts), V: 1}, } return s @@ -252,7 +252,7 @@ func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) pro lb.Set(labels.AlertName, r.name) s := promql.Sample{ - Metric: lb.Labels(labels.EmptyLabels()), + Metric: lb.Labels(), Point: promql.Point{T: timestamp.FromTime(ts), V: v}, } return s @@ -381,7 +381,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, }) annotations := sb.Labels() - lbs := lb.Labels(labels.EmptyLabels()) + lbs := lb.Labels() h := lbs.Hash() resultFPs[h] = struct{}{} diff --git a/rules/recording.go b/rules/recording.go index a07c779bb..b6a886cdd 100644 --- a/rules/recording.go +++ b/rules/recording.go @@ -93,7 +93,7 @@ func (rule *RecordingRule) Eval(ctx context.Context, ts time.Time, query QueryFu lb.Set(l.Name, l.Value) }) - sample.Metric = lb.Labels(sample.Metric) + sample.Metric = lb.Labels() } // Check that the rule does not produce identical metrics after applying diff --git a/scrape/scrape.go b/scrape/scrape.go index 01c66ca81..f38527ff3 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -692,7 +692,7 @@ func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*re } } - res := lb.Labels(labels.EmptyLabels()) + res := lb.Labels() if len(rc) > 0 { res, _ = relabel.Process(res, rc...) @@ -726,7 +726,7 @@ func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels lb.Set(l.Name, l.Value) }) - return lb.Labels(labels.EmptyLabels()) + return lb.Labels() } // appender returns an appender for ingested samples from the target. diff --git a/scrape/target.go b/scrape/target.go index 59f6e2873..f250910c1 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -380,7 +380,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort } } - preRelabelLabels := lb.Labels(labels.EmptyLabels()) + preRelabelLabels := lb.Labels() keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...) // Check if the target was dropped. @@ -476,7 +476,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort lb.Set(model.InstanceLabel, addr) } - res = lb.Labels(labels.EmptyLabels()) + res = lb.Labels() err = res.Validate(func(l labels.Label) error { // Check label values are valid, drop the target if not. if !model.LabelValue(l.Value).IsValid() { diff --git a/scrape/target_test.go b/scrape/target_test.go index 4937359ed..2bc3f000c 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -140,7 +140,7 @@ func newTestTarget(targetURL string, deadline time.Duration, lbls labels.Labels) lb.Set(model.AddressLabel, strings.TrimPrefix(targetURL, "http://")) lb.Set(model.MetricsPathLabel, "/metrics") - return &Target{labels: lb.Labels(labels.EmptyLabels())} + return &Target{labels: lb.Labels()} } func TestNewHTTPBearerToken(t *testing.T) { diff --git a/storage/remote/read.go b/storage/remote/read.go index 21524d70d..af61334f4 100644 --- a/storage/remote/read.go +++ b/storage/remote/read.go @@ -278,5 +278,5 @@ func (sf seriesFilter) Labels() labels.Labels { b := labels.NewBuilder(sf.Series.Labels()) // todo: check if this is too inefficient. b.Del(sf.toFilter...) - return b.Labels(labels.EmptyLabels()) + return b.Labels() } diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index a3b99f87a..cb5aa0122 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -1569,20 +1569,20 @@ func TestSparseHistogramSpaceSavings(t *testing.T) { for it.Next() { numOldSeriesPerHistogram++ b := it.At() - lbls := labels.NewBuilder(ah.baseLabels).Set("le", fmt.Sprintf("%.16f", b.Upper)).Labels(labels.EmptyLabels()) + lbls := labels.NewBuilder(ah.baseLabels).Set("le", fmt.Sprintf("%.16f", b.Upper)).Labels() refs[itIdx], err = oldApp.Append(refs[itIdx], lbls, ts, float64(b.Count)) require.NoError(t, err) itIdx++ } baseName := ah.baseLabels.Get(labels.MetricName) // _count metric. - countLbls := labels.NewBuilder(ah.baseLabels).Set(labels.MetricName, baseName+"_count").Labels(labels.EmptyLabels()) + countLbls := labels.NewBuilder(ah.baseLabels).Set(labels.MetricName, baseName+"_count").Labels() _, err = oldApp.Append(0, countLbls, ts, float64(h.Count)) require.NoError(t, err) numOldSeriesPerHistogram++ // _sum metric. - sumLbls := labels.NewBuilder(ah.baseLabels).Set(labels.MetricName, baseName+"_sum").Labels(labels.EmptyLabels()) + sumLbls := labels.NewBuilder(ah.baseLabels).Set(labels.MetricName, baseName+"_sum").Labels() _, err = oldApp.Append(0, sumLbls, ts, h.Sum) require.NoError(t, err) numOldSeriesPerHistogram++ From b73040cff6c81e10d759cbd2ea23c8484689565c Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Thu, 23 Mar 2023 00:01:34 +0100 Subject: [PATCH 031/632] Fix changelog Signed-off-by: Julien Pivotto --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 13bdde031..6e6b30f47 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,7 @@ the gains on their production architecture. We are providing release artefacts improvements for testing. #10991 * [FEATURE] Promtool: Add HTTP client configuration to query commands. #11487 -* [FEATURE] Scrape: Add `include_scrape_configs` to include scrape configs from different files. #12019 +* [FEATURE] Scrape: Add `scrape_config_files` to include scrape configs from different files. #12019 * [FEATURE] HTTP client: Add `no_proxy` to exclude URLs from proxied requests. #12098 * [FEATURE] HTTP client: Add `proxy_from_enviroment` to read proxies from env variables. #12098 * [ENHANCEMENT] API: Add support for setting lookback delta per query via the API. #12088 From 2fc2e233a68303af8d23df0e95e8e546d20e9123 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 31 Mar 2023 08:47:06 +0000 Subject: [PATCH 032/632] remote-write: raise default samples per send to 2,000 Larger messages cost less, because the per-message overheads at sender and receiver are spread over more samples. Example: scraping 1.5 million series every 15 seconds will send 50 messages per second. Previous default was 500. Signed-off-by: Bryan Boreham --- config/config.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/config/config.go b/config/config.go index a29c98eed..9b0f4ad1e 100644 --- a/config/config.go +++ b/config/config.go @@ -174,10 +174,10 @@ var ( // DefaultQueueConfig is the default remote queue configuration. DefaultQueueConfig = QueueConfig{ // With a maximum of 200 shards, assuming an average of 100ms remote write - // time and 500 samples per batch, we will be able to push 1M samples/s. + // time and 2000 samples per batch, we will be able to push 4M samples/s. MaxShards: 200, MinShards: 1, - MaxSamplesPerSend: 500, + MaxSamplesPerSend: 2000, // Each shard will have a max of 2500 samples pending in its channel, plus the pending // samples that have been enqueued. Theoretically we should only ever have about 3000 samples @@ -194,7 +194,7 @@ var ( DefaultMetadataConfig = MetadataConfig{ Send: true, SendInterval: model.Duration(1 * time.Minute), - MaxSamplesPerSend: 500, + MaxSamplesPerSend: 2000, } // DefaultRemoteReadConfig is the default remote read configuration. From 2bd2a6006dc9050cdf85e7f76653a32e0a14af64 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Apr 2023 23:57:09 +0000 Subject: [PATCH 033/632] build(deps): bump actions/cache from 3.2.4 to 3.3.1 Bumps [actions/cache](https://github.com/actions/cache) from 3.2.4 to 3.3.1. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/v3.2.4...v3.3.1) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ceb374c8c..a1497e560 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -197,7 +197,7 @@ jobs: with: node-version-file: "web/ui/.nvmrc" registry-url: "https://registry.npmjs.org" - - uses: actions/cache@v3.2.4 + - uses: actions/cache@v3.3.1 with: path: ~/.npm key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} From e15b0a29acaa9709c85230ed8b06619251d8179b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Apr 2023 23:57:17 +0000 Subject: [PATCH 034/632] build(deps): bump actions/setup-go from 3 to 4 Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3 to 4. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/ci.yml | 4 ++-- .github/workflows/codeql-analysis.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ceb374c8c..8294d0756 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -52,7 +52,7 @@ jobs: runs-on: windows-latest steps: - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: go-version: '>=1.20 <1.21' - run: | @@ -139,7 +139,7 @@ jobs: - name: Checkout repository uses: actions/checkout@v3 - name: Install Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: go-version: 1.20.x - name: Install snmp_exporter/generator dependencies diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 43c977857..6036e80ae 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Checkout repository uses: actions/checkout@v3 - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: go-version: '>=1.20 <1.21' From a9af885de26aee57ed664c8708e15676f1f4cefe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Apr 2023 23:57:22 +0000 Subject: [PATCH 035/632] build(deps): bump prometheus/promci from 0.0.2 to 0.1.0 Bumps [prometheus/promci](https://github.com/prometheus/promci) from 0.0.2 to 0.1.0. - [Release notes](https://github.com/prometheus/promci/releases) - [Commits](https://github.com/prometheus/promci/compare/v0.0.2...v0.1.0) --- updated-dependencies: - dependency-name: prometheus/promci dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/ci.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ceb374c8c..4af85cc55 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,7 +13,7 @@ jobs: image: quay.io/prometheus/golang-builder:1.20-base steps: - uses: actions/checkout@v3 - - uses: prometheus/promci@v0.0.2 + - uses: prometheus/promci@v0.1.0 - uses: ./.github/promci/actions/setup_environment - run: make GO_ONLY=1 SKIP_GOLANGCI_LINT=1 - run: go test ./tsdb/ -test.tsdb-isolation=false @@ -35,7 +35,7 @@ jobs: steps: - uses: actions/checkout@v3 - - uses: prometheus/promci@v0.0.2 + - uses: prometheus/promci@v0.1.0 - uses: ./.github/promci/actions/setup_environment with: enable_go: false @@ -104,7 +104,7 @@ jobs: thread: [ 0, 1, 2 ] steps: - uses: actions/checkout@v3 - - uses: prometheus/promci@v0.0.2 + - uses: prometheus/promci@v0.1.0 - uses: ./.github/promci/actions/build with: promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386" @@ -127,7 +127,7 @@ jobs: # should also be updated. steps: - uses: actions/checkout@v3 - - uses: prometheus/promci@v0.0.2 + - uses: prometheus/promci@v0.1.0 - uses: ./.github/promci/actions/build with: parallelism: 12 @@ -162,7 +162,7 @@ jobs: if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' steps: - uses: actions/checkout@v3 - - uses: prometheus/promci@v0.0.2 + - uses: prometheus/promci@v0.1.0 - uses: ./.github/promci/actions/publish_main with: docker_hub_login: ${{ secrets.docker_hub_login }} @@ -176,7 +176,7 @@ jobs: if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') steps: - uses: actions/checkout@v3 - - uses: prometheus/promci@v0.0.2 + - uses: prometheus/promci@v0.1.0 - uses: ./.github/promci/actions/publish_release with: docker_hub_login: ${{ secrets.docker_hub_login }} @@ -191,7 +191,7 @@ jobs: steps: - name: Checkout uses: actions/checkout@v3 - - uses: prometheus/promci@v0.0.2 + - uses: prometheus/promci@v0.1.0 - name: Install nodejs uses: actions/setup-node@v3 with: From babdbd8add4e5efc6b457854ee2e999f9c121482 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Apr 2023 23:59:48 +0000 Subject: [PATCH 036/632] build(deps): bump github.com/ionos-cloud/sdk-go/v6 from 6.1.4 to 6.1.5 Bumps [github.com/ionos-cloud/sdk-go/v6](https://github.com/ionos-cloud/sdk-go) from 6.1.4 to 6.1.5. - [Release notes](https://github.com/ionos-cloud/sdk-go/releases) - [Changelog](https://github.com/ionos-cloud/sdk-go/blob/master/docs/CHANGELOG.md) - [Commits](https://github.com/ionos-cloud/sdk-go/compare/v6.1.4...v6.1.5) --- updated-dependencies: - dependency-name: github.com/ionos-cloud/sdk-go/v6 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4b672b862..2075fd891 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,7 @@ require ( github.com/hashicorp/consul/api v1.20.0 github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b github.com/hetznercloud/hcloud-go v1.41.0 - github.com/ionos-cloud/sdk-go/v6 v6.1.4 + github.com/ionos-cloud/sdk-go/v6 v6.1.5 github.com/json-iterator/go v1.1.12 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/linode/linodego v1.14.1 diff --git a/go.sum b/go.sum index 241499158..da2a66336 100644 --- a/go.sum +++ b/go.sum @@ -464,8 +464,8 @@ github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ionos-cloud/sdk-go/v6 v6.1.4 h1:BJHhFA8Q1SZC7VOXqKKr2BV2ysQ2/4hlk1e4hZte7GY= -github.com/ionos-cloud/sdk-go/v6 v6.1.4/go.mod h1:Ox3W0iiEz0GHnfY9e5LmAxwklsxguuNFEUSu0gVRTME= +github.com/ionos-cloud/sdk-go/v6 v6.1.5 h1:BFqThLOgrGJWeo7w6UDyYuNxyi/GqEmNPl7C/YcQ8Fw= +github.com/ionos-cloud/sdk-go/v6 v6.1.5/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= From bf988304d77c64241255ca43ec4e05362cdaf1be Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Apr 2023 23:59:59 +0000 Subject: [PATCH 037/632] build(deps): bump github.com/digitalocean/godo from 1.97.0 to 1.98.0 Bumps [github.com/digitalocean/godo](https://github.com/digitalocean/godo) from 1.97.0 to 1.98.0. - [Release notes](https://github.com/digitalocean/godo/releases) - [Changelog](https://github.com/digitalocean/godo/blob/main/CHANGELOG.md) - [Commits](https://github.com/digitalocean/godo/compare/v1.97.0...v1.98.0) --- updated-dependencies: - dependency-name: github.com/digitalocean/godo dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4b672b862..f0968cd63 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/aws/aws-sdk-go v1.44.217 github.com/cespare/xxhash/v2 v2.2.0 github.com/dennwc/varint v1.0.0 - github.com/digitalocean/godo v1.97.0 + github.com/digitalocean/godo v1.98.0 github.com/docker/docker v23.0.1+incompatible github.com/edsrzf/mmap-go v1.1.0 github.com/envoyproxy/go-control-plane v0.11.0 diff --git a/go.sum b/go.sum index 241499158..9ed1279b9 100644 --- a/go.sum +++ b/go.sum @@ -149,8 +149,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/digitalocean/godo v1.97.0 h1:p9w1yCcWMZcxFSLPToNGXA96WfUVLXqoHti6GzVomL4= -github.com/digitalocean/godo v1.97.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA= +github.com/digitalocean/godo v1.98.0 h1:potyC1eD0N9n5/P4/WmJuKgg+OGYZOBWEW+/aKTX6QQ= +github.com/digitalocean/godo v1.98.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= From 6f2507656c97937a3a9856d3c94945569cb009f7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 2 Apr 2023 00:00:12 +0000 Subject: [PATCH 038/632] build(deps): bump github.com/miekg/dns from 1.1.51 to 1.1.53 Bumps [github.com/miekg/dns](https://github.com/miekg/dns) from 1.1.51 to 1.1.53. - [Release notes](https://github.com/miekg/dns/releases) - [Changelog](https://github.com/miekg/dns/blob/master/Makefile.release) - [Commits](https://github.com/miekg/dns/compare/v1.1.51...v1.1.53) --- updated-dependencies: - dependency-name: github.com/miekg/dns dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 9 ++------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 4b672b862..c01057029 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/linode/linodego v1.14.1 - github.com/miekg/dns v1.1.51 + github.com/miekg/dns v1.1.53 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/oklog/run v1.1.0 github.com/oklog/ulid v1.3.1 diff --git a/go.sum b/go.sum index 241499158..1e23176be 100644 --- a/go.sum +++ b/go.sum @@ -546,8 +546,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfr github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.51 h1:0+Xg7vObnhrz/4ZCZcZh7zPXlmU0aveS2HDBd0m0qSo= -github.com/miekg/dns v1.1.51/go.mod h1:2Z9d3CP1LQWihRZUf29mQ19yDThaI4DAYzte2CaQW5c= +github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw= +github.com/miekg/dns v1.1.53/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -861,7 +861,6 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -910,7 +909,6 @@ golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1002,13 +1000,11 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1081,7 +1077,6 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 0b7cf5b368326658baec5fa789895121472e7fd2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 2 Apr 2023 00:00:56 +0000 Subject: [PATCH 039/632] build(deps): bump google.golang.org/api from 0.111.0 to 0.114.0 Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.111.0 to 0.114.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.111.0...v0.114.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 4b672b862..496e7f062 100644 --- a/go.mod +++ b/go.mod @@ -67,10 +67,10 @@ require ( golang.org/x/sys v0.6.0 golang.org/x/time v0.3.0 golang.org/x/tools v0.7.0 - google.golang.org/api v0.111.0 + google.golang.org/api v0.114.0 google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 google.golang.org/grpc v1.53.0 - google.golang.org/protobuf v1.29.0 + google.golang.org/protobuf v1.29.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.26.2 @@ -134,7 +134,7 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.3.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.7.0 // indirect + github.com/googleapis/gax-go/v2 v2.7.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect github.com/hashicorp/cronexpr v1.1.1 // indirect diff --git a/go.sum b/go.sum index 241499158..a3c3008df 100644 --- a/go.sum +++ b/go.sum @@ -378,8 +378,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9 github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/gophercloud/gophercloud v1.2.0 h1:1oXyj4g54KBg/kFtCdMM6jtxSzeIyg8wv4z1HoGPp1E= github.com/gophercloud/gophercloud v1.2.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -1103,8 +1103,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.111.0 h1:bwKi+z2BsdwYFRKrqwutM+axAlYLz83gt5pDSXCJT+0= -google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1183,8 +1183,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.29.0 h1:44S3JjaKmLEE4YIkjzexaP+NzZsudE3Zin5Njn/pYX0= -google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From ce353727f116f5432192267621fbd1a39c40146b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 2 Apr 2023 00:02:05 +0000 Subject: [PATCH 040/632] build(deps): bump github.com/prometheus/prometheus Bumps [github.com/prometheus/prometheus](https://github.com/prometheus/prometheus) from 0.42.0 to 0.43.0. - [Release notes](https://github.com/prometheus/prometheus/releases) - [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/prometheus/compare/v0.42.0...v0.43.0) --- updated-dependencies: - dependency-name: github.com/prometheus/prometheus dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 32 ++-- documentation/examples/remote_storage/go.sum | 148 +++++++++---------- 2 files changed, 89 insertions(+), 91 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 8eb97aaf5..5e0e6ddb1 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -10,22 +10,22 @@ require ( github.com/influxdata/influxdb v1.11.0 github.com/prometheus/client_golang v1.14.0 github.com/prometheus/common v0.42.0 - github.com/prometheus/prometheus v0.42.0 + github.com/prometheus/prometheus v0.43.0 github.com/stretchr/testify v1.8.2 ) require ( github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect - github.com/aws/aws-sdk-go v1.44.187 // indirect + github.com/aws/aws-sdk-go v1.44.217 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/go-logfmt/logfmt v0.5.1 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect @@ -36,22 +36,22 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/procfs v0.9.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 // indirect - go.opentelemetry.io/otel v1.11.2 // indirect - go.opentelemetry.io/otel/metric v0.34.0 // indirect - go.opentelemetry.io/otel/trace v1.11.2 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 // indirect + go.opentelemetry.io/otel v1.14.0 // indirect + go.opentelemetry.io/otel/metric v0.37.0 // indirect + go.opentelemetry.io/otel/trace v1.14.0 // indirect go.uber.org/atomic v1.10.0 // indirect - go.uber.org/goleak v1.2.0 // indirect - golang.org/x/exp v0.0.0-20230124195608-d38c7dcee874 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/oauth2 v0.5.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect + go.uber.org/goleak v1.2.1 // indirect + golang.org/x/exp v0.0.0-20230307190834-24139beb5833 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/oauth2 v0.6.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/protobuf v1.29.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index e7ba9f53b..11c294515 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -7,7 +7,7 @@ github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+X github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= +github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU= github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -17,10 +17,10 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.187 h1:D5CsRomPnlwDHJCanL2mtaLIcbhjiWxNh5j8zvaWdJA= -github.com/aws/aws-sdk-go v1.44.187/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.217 h1:FcWC56MRl+k756aH3qeMQTylSdeJ58WN0iFz3fkyRz0= +github.com/aws/aws-sdk-go v1.44.217/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -28,23 +28,23 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk= +github.com/cncf/xds/go v0.0.0-20230112175826-46e39c7b9b43 h1:XP+uhjN0yBCN/tPkr8Z0BNDc5rZam9RG6UWyf2FrSQ0= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/digitalocean/godo v1.95.0 h1:S48/byPKui7RHZc1wYEPfRvkcEvToADNb5I3guu95xg= +github.com/digitalocean/godo v1.97.0 h1:p9w1yCcWMZcxFSLPToNGXA96WfUVLXqoHti6GzVomL4= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/docker v20.10.23+incompatible h1:1ZQUUYAdh+oylOT85aA2ZcfRp22jmLhoaEcVEfK8dyA= +github.com/docker/docker v23.0.1+incompatible h1:vjgvJZxprTTE1A37nm+CLNAdwu6xZekyoiVlUZEINcY= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= +github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= +github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= @@ -56,24 +56,24 @@ github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBj github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= -github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY= +github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= -github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -86,11 +86,11 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -100,27 +100,27 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/gophercloud/gophercloud v1.1.1 h1:MuGyqbSxiuVBqkPZ3+Nhbytk1xZxhmfCB2Rg1cJWFWM= +github.com/gophercloud/gophercloud v1.2.0 h1:1oXyj4g54KBg/kFtCdMM6jtxSzeIyg8wv4z1HoGPp1E= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= -github.com/hashicorp/consul/api v1.18.0 h1:R7PPNzTCeN6VuQNDwwhZWJvzCtGSrNpJqfb22h3yH9g= +github.com/hashicorp/consul/api v1.20.0 h1:9IHTjNVSZ7MIwjlW3N3a7iGiykCMDpxZu8jsxFJh0yc= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= +github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= +github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= -github.com/hashicorp/nomad/api v0.0.0-20230124213148-69fd1a0e4bf7 h1:XOdd3JHyeQnBRxotBo9ibxBFiYGuYhQU25s/YeV2cTU= +github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b h1:EkuSTU8c/63q4LMayj8ilgg/4I5PXDFVcnqKfs9qcwI= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= -github.com/hetznercloud/hcloud-go v1.39.0 h1:RUlzI458nGnPR6dlcZlrsGXYC1hQlFbKdm8tVtEQQB0= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/hetznercloud/hcloud-go v1.41.0 h1:KJGFRRc68QiVu4PrEP5BmCQVveCP2CM26UGQUKGpIUs= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/influxdata/influxdb v1.11.0 h1:0X+ZsbcOWc6AEi5MHee9BYqXCKmz8IZsljrRYjmV8Qg= github.com/influxdata/influxdb v1.11.0/go.mod h1:V93tJcidY0Zh0LtSONZWnXXGDyt20dtVf+Ddp4EnhaA= -github.com/ionos-cloud/sdk-go/v6 v6.1.3 h1:vb6yqdpiqaytvreM0bsn2pXw+1YDvEk2RKSmBAQvgDQ= +github.com/ionos-cloud/sdk-go/v6 v6.1.4 h1:BJHhFA8Q1SZC7VOXqKKr2BV2ysQ2/4hlk1e4hZte7GY= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -147,14 +147,14 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/linode/linodego v1.12.0 h1:33mOIrZ+gVva14gyJMKPZ85mQGovAvZCEP1ftgmFBjA= +github.com/linode/linodego v1.14.1 h1:uGxQyy0BidoEpLGdvfi4cPgEW+0YUFsEGrLEhcTfjNc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= +github.com/miekg/dns v1.1.51 h1:0+Xg7vObnhrz/4ZCZcZh7zPXlmU0aveS2HDBd0m0qSo= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -200,17 +200,16 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/prometheus v0.42.0 h1:G769v8covTkOiNckXFIwLx01XE04OE6Fr0JPA0oR2nI= -github.com/prometheus/prometheus v0.42.0/go.mod h1:Pfqb/MLnnR2KK+0vchiaH39jXxvLMBk+3lnIGP4N7Vk= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/prometheus v0.43.0 h1:18iCSfrbAHbXvYFvR38U1Pt4uZmU9SmDcCpCrBKUiGg= +github.com/prometheus/prometheus v0.43.0/go.mod h1:2BA14LgBeqlPuzObSEbh+Y+JwLH2GcqDlJKbF2sA6FM= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.12 h1:Aaz4T7dZp7cB2cv7D/tGtRdSMh48sRaDYr7Jh0HV4qQ= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 h1:yFl3jyaSVLNYXlnNYM5z2pagEk1dYQhfr1p20T1NyKY= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -230,31 +229,30 @@ github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtX github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 h1:yt2NKzK7Vyo6h0+X8BA4FpreZQTlVEIarnsBP/H5mzs= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0/go.mod h1:+ARmXlUlc51J7sZeCBkBJNdHGySrdOzgzxp6VWRWM1U= -go.opentelemetry.io/otel v1.11.2 h1:YBZcQlsVekzFsFbjygXMOXSs6pialIZxcjfO/mBDmR0= -go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI= -go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW07YkjP8= -go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8= -go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0= -go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 h1:lE9EJyw3/JhrjWH/hEy9FptnalDQgj7vpbgC2KCCCxE= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0/go.mod h1:pcQ3MM3SWvrA71U4GDqv9UFDJ3HQsW7y5ZO3tDTlUdI= +go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= +go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= +go.opentelemetry.io/otel/metric v0.37.0 h1:pHDQuLQOZwYD+Km0eb657A25NaRzy0a+eLyKfDXedEs= +go.opentelemetry.io/otel/metric v0.37.0/go.mod h1:DmdaHfGt54iV6UKxsV9slj2bBRJcKC1B1uvDLIioc1s= +go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= +go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= -golang.org/x/exp v0.0.0-20230124195608-d38c7dcee874 h1:kWC3b7j6Fu09SnEBr7P4PuQyM0R6sqyH9R+EjIvT1nQ= -golang.org/x/exp v0.0.0-20230124195608-d38c7dcee874/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= +golang.org/x/exp v0.0.0-20230307190834-24139beb5833 h1:SChBja7BCQewoTAU7IgvucQKMIXrEpFxNMs0spT3/5s= +golang.org/x/exp v0.0.0-20230307190834-24139beb5833/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= +golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -271,12 +269,12 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s= -golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -303,20 +301,20 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -324,7 +322,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -333,8 +331,8 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2 h1:O97sLx/Xmb/KIZHB/2/BzofxBs5QmmR0LcihPtllmbc= -google.golang.org/grpc v1.52.1 h1:2NpOPk5g5Xtb0qebIEs7hNIa++PdtZLo2AQUpc1YnSU= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -343,8 +341,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.0 h1:44S3JjaKmLEE4YIkjzexaP+NzZsudE3Zin5Njn/pYX0= +google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -352,7 +350,7 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -364,13 +362,13 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ= -k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ= -k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU= +k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ= +k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ= +k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 h1:tBEbstoM+K0FiBV5KGAKQ0kuvf54v/hwpldiJt69w1s= -k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y= +k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d h1:VcFq5n7wCJB2FQMCIHfC+f+jNcGgNMar1uKd6rVlifU= +k8s.io/utils v0.0.0-20230308161112-d77c459e9343 h1:m7tbIjXGcGIAtpmQr7/NAi7RsWoW3E7Zcm4jI1HicTc= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= From 408f31f78669a27e92aac41452a4f67dfb44f917 Mon Sep 17 00:00:00 2001 From: Hayk Davtyan <46712946+hayk96@users.noreply.github.com> Date: Sun, 2 Apr 2023 13:37:58 +0400 Subject: [PATCH 041/632] [WebUI/ScrapePoolList] Case-insensitive search of "Scrape Pools" (#12207) Signed-off-by: hayk96 --- .../react-app/src/pages/targets/ScrapePoolList.tsx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/web/ui/react-app/src/pages/targets/ScrapePoolList.tsx b/web/ui/react-app/src/pages/targets/ScrapePoolList.tsx index c9e2947e6..29585a200 100644 --- a/web/ui/react-app/src/pages/targets/ScrapePoolList.tsx +++ b/web/ui/react-app/src/pages/targets/ScrapePoolList.tsx @@ -31,6 +31,8 @@ const ScrapePoolDropDown: FC = ({ selectedPool, scrapeP const [filter, setFilter] = useState(''); + const filteredPools = scrapePools.filter((pool) => pool.toLowerCase().includes(filter.toLowerCase())); + return ( @@ -51,13 +53,11 @@ const ScrapePoolDropDown: FC = ({ selectedPool, scrapeP {scrapePools.length === 0 ? ( No scrape pools configured ) : ( - scrapePools - .filter((name) => filter === '' || name.includes(filter)) - .map((name) => ( - onScrapePoolChange(name)} active={name === selectedPool}> - {name} - - )) + filteredPools.map((name) => ( + onScrapePoolChange(name)} active={name === selectedPool}> + {name} + + )) )} From 889ef998f4f73d0d1fbd961e59494584e3616e42 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sun, 2 Apr 2023 11:00:14 +0100 Subject: [PATCH 042/632] remote-write: adjust MaxShards and Capacity for larger MaxSamplesPerSend Keep the target throughput and total pending samples the same. Signed-off-by: Bryan Boreham --- config/config.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/config/config.go b/config/config.go index 9b0f4ad1e..5c51d5a0d 100644 --- a/config/config.go +++ b/config/config.go @@ -173,16 +173,16 @@ var ( // DefaultQueueConfig is the default remote queue configuration. DefaultQueueConfig = QueueConfig{ - // With a maximum of 200 shards, assuming an average of 100ms remote write - // time and 2000 samples per batch, we will be able to push 4M samples/s. - MaxShards: 200, + // With a maximum of 50 shards, assuming an average of 100ms remote write + // time and 2000 samples per batch, we will be able to push 1M samples/s. + MaxShards: 50, MinShards: 1, MaxSamplesPerSend: 2000, - // Each shard will have a max of 2500 samples pending in its channel, plus the pending - // samples that have been enqueued. Theoretically we should only ever have about 3000 samples - // per shard pending. At 200 shards that's 600k. - Capacity: 2500, + // Each shard will have a max of 10,000 samples pending in its channel, plus the pending + // samples that have been enqueued. Theoretically we should only ever have about 12,000 samples + // per shard pending. At 50 shards that's 600k. + Capacity: 10000, BatchSendDeadline: model.Duration(5 * time.Second), // Backoff times for retrying a batch of samples on recoverable errors. From e91720276654d0180f5dfa655f4561cb5c87e986 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sun, 2 Apr 2023 11:17:05 +0100 Subject: [PATCH 043/632] labels: make sure estimated size is not negative Deleted labels are remembered, even if they were not in `base` or were removed from `add`, so `base+add-del` could go negative. Signed-off-by: Bryan Boreham --- model/labels/labels.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/model/labels/labels.go b/model/labels/labels.go index 4e87eb9b3..e9d6c940e 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -577,7 +577,11 @@ func (b *Builder) Labels() Labels { return b.base } - res := make(Labels, 0, len(b.base)-len(b.del)+len(b.add)) + expectedSize := len(b.base) + len(b.add) - len(b.del) + if expectedSize < 1 { + expectedSize = 1 + } + res := make(Labels, 0, expectedSize) for _, l := range b.base { if slices.Contains(b.del, l.Name) || contains(b.add, l.Name) { continue From 8b011893276e357181c65c4c6dd095228e92a49b Mon Sep 17 00:00:00 2001 From: SuperQ Date: Mon, 3 Apr 2023 09:05:10 +0200 Subject: [PATCH 044/632] Move errcheck excludes config Eliminate the need for a second config file for golangci-lint config file by moving the list of errcheck exclude functions into the yaml config. Signed-off-by: SuperQ --- .github/workflows/ci.yml | 1 + .golangci.yml | 13 ++++++++++++- scripts/errcheck_excludes.txt | 13 ------------- 3 files changed, 13 insertions(+), 14 deletions(-) delete mode 100644 scripts/errcheck_excludes.txt diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ceb374c8c..1e625b63b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,6 +3,7 @@ name: CI on: pull_request: push: + jobs: test_go: name: Go tests diff --git a/.golangci.yml b/.golangci.yml index 81790e6e3..efa6b2044 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -33,7 +33,18 @@ linters-settings: - io/ioutil: "Use corresponding 'os' or 'io' functions instead." - regexp: "Use github.com/grafana/regexp instead of regexp" errcheck: - exclude: scripts/errcheck_excludes.txt + exclude-functions: + # Don't flag lines such as "io.Copy(io.Discard, resp.Body)". + - io.Copy + # The next two are used in HTTP handlers, any error is handled by the server itself. + - io.WriteString + - (net/http.ResponseWriter).Write + # No need to check for errors on server's shutdown. + - (*net/http.Server).Shutdown + # Never check for logger errors. + - (github.com/go-kit/log.Logger).Log + # Never check for rollback errors as Rollback() is called when a previous error was detected. + - (github.com/prometheus/prometheus/storage.Appender).Rollback goimports: local-prefixes: github.com/prometheus/prometheus gofumpt: diff --git a/scripts/errcheck_excludes.txt b/scripts/errcheck_excludes.txt deleted file mode 100644 index 8c77ca148..000000000 --- a/scripts/errcheck_excludes.txt +++ /dev/null @@ -1,13 +0,0 @@ -// Don't flag lines such as "io.Copy(io.Discard, resp.Body)". -io.Copy -// The next two are used in HTTP handlers, any error is handled by the server itself. -io.WriteString -(net/http.ResponseWriter).Write -// No need to check for errors on server's shutdown. -(*net/http.Server).Shutdown - -// Never check for logger errors. -(github.com/go-kit/log.Logger).Log - -// Never check for rollback errors as Rollback() is called when a previous error was detected. -(github.com/prometheus/prometheus/storage.Appender).Rollback From f2b9a39a48350c04a64541c934eb2a6e475fc9b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Mierzwa?= Date: Fri, 31 Mar 2023 14:31:14 +0100 Subject: [PATCH 045/632] Use a random port in cmd/prometheus tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are a few tests that will run prometheus command. This can test if there's already something listening on port :9090 since --web.listen-address defaults to 0.0.0.0:9090. To fix that we can tell prometheus to use a random port on loopback interface. Signed-off-by: Łukasz Mierzwa --- cmd/prometheus/main_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index 26d11e21e..21447d036 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -121,7 +121,7 @@ func TestFailedStartupExitCode(t *testing.T) { fakeInputFile := "fake-input-file" expectedExitStatus := 2 - prom := exec.Command(promPath, "-test.main", "--config.file="+fakeInputFile) + prom := exec.Command(promPath, "-test.main", "--web.listen-address=0.0.0.0:0", "--config.file="+fakeInputFile) err := prom.Run() require.Error(t, err) @@ -358,7 +358,7 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames } func TestAgentSuccessfulStartup(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+agentConfig) + prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig) require.NoError(t, prom.Start()) actualExitStatus := 0 @@ -376,7 +376,7 @@ func TestAgentSuccessfulStartup(t *testing.T) { } func TestAgentFailedStartupWithServerFlag(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--config.file="+promConfig) + prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) output := bytes.Buffer{} prom.Stderr = &output @@ -403,7 +403,7 @@ func TestAgentFailedStartupWithServerFlag(t *testing.T) { } func TestAgentFailedStartupWithInvalidConfig(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+promConfig) + prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) require.NoError(t, prom.Start()) actualExitStatus := 0 @@ -438,7 +438,7 @@ func TestModeSpecificFlags(t *testing.T) { for _, tc := range testcases { t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) { - args := []string{"-test.main", tc.arg, t.TempDir()} + args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"} if tc.mode == "agent" { args = append(args, "--enable-feature=agent", "--config.file="+agentConfig) From 291ab4d0bc6ea9f0e61073f303bd3982e9727b8d Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Mon, 3 Apr 2023 19:41:31 +0530 Subject: [PATCH 046/632] =?UTF-8?q?Add=20Jes=C3=BAs=20V=C3=A1zquez=20as=20?= =?UTF-8?q?a=20TSDB=20maintainer=20(#12222)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Ganesh Vernekar --- MAINTAINERS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 103ebdda6..1175bb9a6 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -10,7 +10,7 @@ Julien Pivotto ( / @roidelapluie) and Levi Harrison * `prometheus-mixin`: Björn Rabenstein ( / @beorn7) * `storage` * `remote`: Chris Marchbanks ( / @csmarchbanks), Callum Styan ( / @cstyan), Bartłomiej Płotka ( / @bwplotka), Tom Wilkie ( / @tomwilkie) -* `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka) +* `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka), Jesús Vázquez ( / @jesusvazquez) * `agent`: Robert Fratto ( / @rfratto) * `web` * `ui`: Julius Volz ( / @juliusv) From c8b408cf5e60cc57fbcc087ca86ade99552097f5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Apr 2023 20:16:09 +0000 Subject: [PATCH 047/632] build(deps): bump github.com/Azure/go-autorest/autorest/adal Bumps [github.com/Azure/go-autorest/autorest/adal](https://github.com/Azure/go-autorest) from 0.9.22 to 0.9.23. - [Release notes](https://github.com/Azure/go-autorest/releases) - [Changelog](https://github.com/Azure/go-autorest/blob/main/CHANGELOG.md) - [Commits](https://github.com/Azure/go-autorest/compare/autorest/adal/v0.9.22...autorest/adal/v0.9.23) --- updated-dependencies: - dependency-name: github.com/Azure/go-autorest/autorest/adal dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ef3e73815..92cc3931c 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.19 require ( github.com/Azure/azure-sdk-for-go v65.0.0+incompatible github.com/Azure/go-autorest/autorest v0.11.28 - github.com/Azure/go-autorest/autorest/adal v0.9.22 + github.com/Azure/go-autorest/autorest/adal v0.9.23 github.com/alecthomas/kingpin/v2 v2.3.2 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 github.com/aws/aws-sdk-go v1.44.217 diff --git a/go.sum b/go.sum index e9c973ba4..7ea1be034 100644 --- a/go.sum +++ b/go.sum @@ -45,8 +45,8 @@ github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSW github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= -github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= +github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= @@ -826,6 +826,7 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -909,6 +910,7 @@ golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1000,11 +1002,13 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1016,6 +1020,7 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 3f7beeecc69ffe83ebaa4def073f644e178bd131 Mon Sep 17 00:00:00 2001 From: Nidhey Nitin Indurkar <46122307+nidhey27@users.noreply.github.com> Date: Tue, 4 Apr 2023 02:02:39 +0530 Subject: [PATCH 048/632] feat: health and readiness check of prometheus server in CLI (promtool) (#12096) * feat: health and readiness check of prometheus server in CLI (promtool) Signed-off-by: nidhey27 --- cmd/promtool/main.go | 60 +++++++++++++++++++++++++++++++++++ docs/command-line/promtool.md | 31 ++++++++++++++++++ 2 files changed, 91 insertions(+) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 3988957ef..064e7a04f 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -71,6 +71,8 @@ const ( lintOptionAll = "all" lintOptionDuplicateRules = "duplicate-rules" lintOptionNone = "none" + checkHealth = "/-/healthy" + checkReadiness = "/-/ready" ) var lintOptions = []string{lintOptionAll, lintOptionDuplicateRules, lintOptionNone} @@ -87,6 +89,7 @@ func main() { app.HelpFlag.Short('h') checkCmd := app.Command("check", "Check the resources for validity.") + checkCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath) sdCheckCmd := checkCmd.Command("service-discovery", "Perform service discovery for the given job name and report the results, including relabeling.") sdConfigFile := sdCheckCmd.Arg("config-file", "The prometheus config file.").Required().ExistingFile() @@ -113,6 +116,18 @@ func main() { "The config files to check.", ).Required().ExistingFiles() + checkServerHealthCmd := checkCmd.Command("healthy", "Check if the Prometheus server is healthy.") + serverHealthURLArg := checkServerHealthCmd.Arg( + "server", + "The URL of the Prometheus server to check (e.g. http://localhost:9090)", + ).URL() + + checkServerReadyCmd := checkCmd.Command("ready", "Check if the Prometheus server is ready.") + serverReadyURLArg := checkServerReadyCmd.Arg( + "server", + "The URL of the Prometheus server to check (e.g. http://localhost:9090)", + ).URL() + checkRulesCmd := checkCmd.Command("rules", "Check if the rule files are valid or not.") ruleFiles := checkRulesCmd.Arg( "rule-files", @@ -276,6 +291,12 @@ func main() { case checkConfigCmd.FullCommand(): os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...)) + case checkServerHealthCmd.FullCommand(): + os.Exit(checkErr(CheckServerStatus(*serverHealthURLArg, checkHealth, httpRoundTripper))) + + case checkServerReadyCmd.FullCommand(): + os.Exit(checkErr(CheckServerStatus(*serverReadyURLArg, checkReadiness, httpRoundTripper))) + case checkWebConfigCmd.FullCommand(): os.Exit(CheckWebConfig(*webConfigFiles...)) @@ -369,6 +390,45 @@ func (ls lintConfig) lintDuplicateRules() bool { return ls.all || ls.duplicateRules } +const promDefaultURL = "http://localhost:9090" + +// Check server status - healthy & ready. +func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper http.RoundTripper) error { + if serverURL == nil { + serverURL, _ = url.Parse(promDefaultURL) + } + + config := api.Config{ + Address: serverURL.String() + checkEndpoint, + RoundTripper: roundTripper, + } + + // Create new client. + c, err := api.NewClient(config) + if err != nil { + fmt.Fprintln(os.Stderr, "error creating API client:", err) + return err + } + + request, err := http.NewRequest("GET", config.Address, nil) + if err != nil { + return err + } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + response, dataBytes, err := c.Do(ctx, request) + if err != nil { + return err + } + + if response.StatusCode != http.StatusOK { + return fmt.Errorf("check failed: URL=%s, status=%d", serverURL, response.StatusCode) + } + + fmt.Fprintln(os.Stderr, " SUCCESS: ", string(dataBytes)) + return nil +} + // CheckConfig validates configuration files. func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files ...string) int { failed := false diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 42d853b85..543034f15 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -58,6 +58,7 @@ Check the resources for validity. | Flag | Description | | --- | --- | +| --http.config.file | HTTP client configuration file for promtool to connect to Prometheus. | | --extended | Print extended information related to the cardinality of the metrics. | @@ -130,6 +131,36 @@ Check if the web config files are valid or not. +##### `promtool check healthy` + +Check if the Prometheus server is healthy. + + + +###### Arguments + +| Argument | Description | +| --- | --- | +| server | The URL of the Prometheus server to check (e.g. http://localhost:9090) | + + + + +##### `promtool check ready` + +Check if the Prometheus server is ready. + + + +###### Arguments + +| Argument | Description | +| --- | --- | +| server | The URL of the Prometheus server to check (e.g. http://localhost:9090) | + + + + ##### `promtool check rules` Check if the rule files are valid or not. From 3923e83413075a17c5a10739696a879765a05542 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Apr 2023 23:25:07 +0200 Subject: [PATCH 049/632] build(deps): bump bufbuild/buf-setup-action from 1.13.1 to 1.16.0 (#12209) Bumps [bufbuild/buf-setup-action](https://github.com/bufbuild/buf-setup-action) from 1.13.1 to 1.16.0. - [Release notes](https://github.com/bufbuild/buf-setup-action/releases) - [Commits](https://github.com/bufbuild/buf-setup-action/compare/v1.13.1...v1.16.0) --- updated-dependencies: - dependency-name: bufbuild/buf-setup-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/buf-lint.yml | 2 +- .github/workflows/buf.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index 5d93c7dfe..3275d08fc 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.13.1 + - uses: bufbuild/buf-setup-action@v1.16.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@v1 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index 2bfee7138..c80183b18 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -9,7 +9,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.13.1 + - uses: bufbuild/buf-setup-action@v1.16.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@v1 From 1936868e9d13e2aeda277f8d0782fc4739612ba8 Mon Sep 17 00:00:00 2001 From: Alex Le Date: Mon, 3 Apr 2023 23:31:49 -0700 Subject: [PATCH 050/632] Allow populate block logic in compact to be overriden outside Prometheus (#11711) Signed-off-by: Alex Le Signed-off-by: Alex Le --- tsdb/compact.go | 104 ++++++++++++++++++++++++------------------- tsdb/compact_test.go | 19 ++++---- tsdb/db_test.go | 10 ++--- tsdb/querier.go | 6 +-- 4 files changed, 75 insertions(+), 64 deletions(-) diff --git a/tsdb/compact.go b/tsdb/compact.go index 26a7c78c8..b2d412375 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -75,7 +75,7 @@ type Compactor interface { // LeveledCompactor implements the Compactor interface. type LeveledCompactor struct { - metrics *compactorMetrics + metrics *CompactorMetrics logger log.Logger ranges []int64 chunkPool chunkenc.Pool @@ -84,47 +84,47 @@ type LeveledCompactor struct { mergeFunc storage.VerticalChunkSeriesMergeFunc } -type compactorMetrics struct { - ran prometheus.Counter - populatingBlocks prometheus.Gauge - overlappingBlocks prometheus.Counter - duration prometheus.Histogram - chunkSize prometheus.Histogram - chunkSamples prometheus.Histogram - chunkRange prometheus.Histogram +type CompactorMetrics struct { + Ran prometheus.Counter + PopulatingBlocks prometheus.Gauge + OverlappingBlocks prometheus.Counter + Duration prometheus.Histogram + ChunkSize prometheus.Histogram + ChunkSamples prometheus.Histogram + ChunkRange prometheus.Histogram } -func newCompactorMetrics(r prometheus.Registerer) *compactorMetrics { - m := &compactorMetrics{} +func newCompactorMetrics(r prometheus.Registerer) *CompactorMetrics { + m := &CompactorMetrics{} - m.ran = prometheus.NewCounter(prometheus.CounterOpts{ + m.Ran = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_compactions_total", Help: "Total number of compactions that were executed for the partition.", }) - m.populatingBlocks = prometheus.NewGauge(prometheus.GaugeOpts{ + m.PopulatingBlocks = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "prometheus_tsdb_compaction_populating_block", Help: "Set to 1 when a block is currently being written to the disk.", }) - m.overlappingBlocks = prometheus.NewCounter(prometheus.CounterOpts{ + m.OverlappingBlocks = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_vertical_compactions_total", Help: "Total number of compactions done on overlapping blocks.", }) - m.duration = prometheus.NewHistogram(prometheus.HistogramOpts{ + m.Duration = prometheus.NewHistogram(prometheus.HistogramOpts{ Name: "prometheus_tsdb_compaction_duration_seconds", Help: "Duration of compaction runs", Buckets: prometheus.ExponentialBuckets(1, 2, 14), }) - m.chunkSize = prometheus.NewHistogram(prometheus.HistogramOpts{ + m.ChunkSize = prometheus.NewHistogram(prometheus.HistogramOpts{ Name: "prometheus_tsdb_compaction_chunk_size_bytes", Help: "Final size of chunks on their first compaction", Buckets: prometheus.ExponentialBuckets(32, 1.5, 12), }) - m.chunkSamples = prometheus.NewHistogram(prometheus.HistogramOpts{ + m.ChunkSamples = prometheus.NewHistogram(prometheus.HistogramOpts{ Name: "prometheus_tsdb_compaction_chunk_samples", Help: "Final number of samples on their first compaction", Buckets: prometheus.ExponentialBuckets(4, 1.5, 12), }) - m.chunkRange = prometheus.NewHistogram(prometheus.HistogramOpts{ + m.ChunkRange = prometheus.NewHistogram(prometheus.HistogramOpts{ Name: "prometheus_tsdb_compaction_chunk_range_seconds", Help: "Final time range of chunks on their first compaction", Buckets: prometheus.ExponentialBuckets(100, 4, 10), @@ -132,13 +132,13 @@ func newCompactorMetrics(r prometheus.Registerer) *compactorMetrics { if r != nil { r.MustRegister( - m.ran, - m.populatingBlocks, - m.overlappingBlocks, - m.duration, - m.chunkRange, - m.chunkSamples, - m.chunkSize, + m.Ran, + m.PopulatingBlocks, + m.OverlappingBlocks, + m.Duration, + m.ChunkRange, + m.ChunkSamples, + m.ChunkSize, ) } return m @@ -392,6 +392,10 @@ func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta { // Compact creates a new block in the compactor's directory from the blocks in the // provided directories. func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (uid ulid.ULID, err error) { + return c.CompactWithPopulateBlockFunc(dest, dirs, open, DefaultPopulateBlockFunc{}) +} + +func (c *LeveledCompactor) CompactWithPopulateBlockFunc(dest string, dirs []string, open []*Block, populateBlockFunc PopulateBlockFunc) (uid ulid.ULID, err error) { var ( blocks []BlockReader bs []*Block @@ -435,7 +439,7 @@ func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (u uid = ulid.MustNew(ulid.Now(), rand.Reader) meta := CompactBlockMetas(uid, metas...) - err = c.write(dest, meta, blocks...) + err = c.write(dest, meta, populateBlockFunc, blocks...) if err == nil { if meta.Stats.NumSamples == 0 { for _, b := range bs { @@ -501,7 +505,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, p } } - err := c.write(dest, meta, b) + err := c.write(dest, meta, DefaultPopulateBlockFunc{}, b) if err != nil { return uid, err } @@ -546,7 +550,7 @@ func (w *instrumentedChunkWriter) WriteChunks(chunks ...chunks.Meta) error { } // write creates a new block that is the union of the provided blocks into dir. -func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockReader) (err error) { +func (c *LeveledCompactor) write(dest string, meta *BlockMeta, populateBlockFunc PopulateBlockFunc, blocks ...BlockReader) (err error) { dir := filepath.Join(dest, meta.ULID.String()) tmp := dir + tmpForCreationBlockDirSuffix var closers []io.Closer @@ -557,8 +561,8 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe if err := os.RemoveAll(tmp); err != nil { level.Error(c.logger).Log("msg", "removed tmp folder after failed compaction", "err", err.Error()) } - c.metrics.ran.Inc() - c.metrics.duration.Observe(time.Since(t).Seconds()) + c.metrics.Ran.Inc() + c.metrics.Duration.Observe(time.Since(t).Seconds()) }(time.Now()) if err = os.RemoveAll(tmp); err != nil { @@ -582,9 +586,9 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe if meta.Compaction.Level == 1 { chunkw = &instrumentedChunkWriter{ ChunkWriter: chunkw, - size: c.metrics.chunkSize, - samples: c.metrics.chunkSamples, - trange: c.metrics.chunkRange, + size: c.metrics.ChunkSize, + samples: c.metrics.ChunkSamples, + trange: c.metrics.ChunkRange, } } @@ -594,7 +598,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe } closers = append(closers, indexw) - if err := c.populateBlock(blocks, meta, indexw, chunkw); err != nil { + if err := populateBlockFunc.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, indexw, chunkw); err != nil { return errors.Wrap(err, "populate block") } @@ -659,10 +663,16 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe return nil } -// populateBlock fills the index and chunk writers with new data gathered as the union +type PopulateBlockFunc interface { + PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) error +} + +type DefaultPopulateBlockFunc struct{} + +// PopulateBlock fills the index and chunk writers with new data gathered as the union // of the provided blocks. It returns meta information for the new block. // It expects sorted blocks input by mint. -func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) (err error) { +func (c DefaultPopulateBlockFunc) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) (err error) { if len(blocks) == 0 { return errors.New("cannot populate block from no readers") } @@ -679,23 +689,23 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, errs.Add(errors.Wrap(cerr, "close")) } err = errs.Err() - c.metrics.populatingBlocks.Set(0) + metrics.PopulatingBlocks.Set(0) }() - c.metrics.populatingBlocks.Set(1) + metrics.PopulatingBlocks.Set(1) globalMaxt := blocks[0].Meta().MaxTime for i, b := range blocks { select { - case <-c.ctx.Done(): - return c.ctx.Err() + case <-ctx.Done(): + return ctx.Err() default: } if !overlapping { if i > 0 && b.Meta().MinTime < globalMaxt { - c.metrics.overlappingBlocks.Inc() + metrics.OverlappingBlocks.Inc() overlapping = true - level.Info(c.logger).Log("msg", "Found overlapping blocks during compaction", "ulid", meta.ULID) + level.Info(logger).Log("msg", "Found overlapping blocks during compaction", "ulid", meta.ULID) } if b.Meta().MaxTime > globalMaxt { globalMaxt = b.Meta().MaxTime @@ -727,7 +737,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, } all = indexr.SortedPostings(all) // Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp. - sets = append(sets, newBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, all, meta.MinTime, meta.MaxTime-1, false)) + sets = append(sets, NewBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, all, meta.MinTime, meta.MaxTime-1, false)) syms := indexr.Symbols() if i == 0 { symbols = syms @@ -755,14 +765,14 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, if len(sets) > 1 { // Merge series using specified chunk series merger. // The default one is the compacting series merger. - set = storage.NewMergeChunkSeriesSet(sets, c.mergeFunc) + set = storage.NewMergeChunkSeriesSet(sets, mergeFunc) } // Iterate over all sorted chunk series. for set.Next() { select { - case <-c.ctx.Done(): - return c.ctx.Err() + case <-ctx.Done(): + return ctx.Err() default: } s := set.At() @@ -797,7 +807,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, } for _, chk := range chks { - if err := c.chunkPool.Put(chk.Chunk); err != nil { + if err := chunkPool.Put(chk.Chunk); err != nil { return errors.Wrap(err, "put chunk") } } diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index b58755e63..0a01892d5 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -441,7 +441,7 @@ func TestCompactionFailWillCleanUpTempDir(t *testing.T) { tmpdir := t.TempDir() - require.Error(t, compactor.write(tmpdir, &BlockMeta{}, erringBReader{})) + require.Error(t, compactor.write(tmpdir, &BlockMeta{}, DefaultPopulateBlockFunc{}, erringBReader{})) _, err = os.Stat(filepath.Join(tmpdir, BlockMeta{}.ULID.String()) + tmpForCreationBlockDirSuffix) require.True(t, os.IsNotExist(err), "directory is not cleaned up") } @@ -953,7 +953,8 @@ func TestCompaction_populateBlock(t *testing.T) { } iw := &mockIndexWriter{} - err = c.populateBlock(blocks, meta, iw, nopChunkWriter{}) + populateBlockFunc := DefaultPopulateBlockFunc{} + err = populateBlockFunc.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, iw, nopChunkWriter{}) if tc.expErr != nil { require.Error(t, err) require.Equal(t, tc.expErr.Error(), err.Error()) @@ -1181,14 +1182,14 @@ func TestCancelCompactions(t *testing.T) { db, err := open(tmpdir, log.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000}, nil) require.NoError(t, err) require.Equal(t, 3, len(db.Blocks()), "initial block count mismatch") - require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "initial compaction counter mismatch") + require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran), "initial compaction counter mismatch") db.compactc <- struct{}{} // Trigger a compaction. - for prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.populatingBlocks) <= 0 { + for prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.PopulatingBlocks) <= 0 { time.Sleep(3 * time.Millisecond) } start := time.Now() - for prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran) != 1 { + for prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran) != 1 { time.Sleep(3 * time.Millisecond) } timeCompactionUninterrupted = time.Since(start) @@ -1200,10 +1201,10 @@ func TestCancelCompactions(t *testing.T) { db, err := open(tmpdirCopy, log.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000}, nil) require.NoError(t, err) require.Equal(t, 3, len(db.Blocks()), "initial block count mismatch") - require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "initial compaction counter mismatch") + require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran), "initial compaction counter mismatch") db.compactc <- struct{}{} // Trigger a compaction. - for prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.populatingBlocks) <= 0 { + for prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.PopulatingBlocks) <= 0 { time.Sleep(3 * time.Millisecond) } @@ -1284,7 +1285,7 @@ func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) { require.NoError(t, os.RemoveAll(lastBlockIndex)) // Corrupt the block by removing the index file. require.Equal(t, 0.0, prom_testutil.ToFloat64(db.metrics.reloadsFailed), "initial 'failed db reloadBlocks' count metrics mismatch") - require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "initial `compactions` count metric mismatch") + require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran), "initial `compactions` count metric mismatch") require.Equal(t, 0.0, prom_testutil.ToFloat64(db.metrics.compactionsFailed), "initial `compactions failed` count metric mismatch") // Do the compaction and check the metrics. @@ -1292,7 +1293,7 @@ func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) { // the new block created from the compaction should be deleted. require.Error(t, db.Compact()) require.Equal(t, 1.0, prom_testutil.ToFloat64(db.metrics.reloadsFailed), "'failed db reloadBlocks' count metrics mismatch") - require.Equal(t, 1.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "`compaction` count metric mismatch") + require.Equal(t, 1.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran), "`compaction` count metric mismatch") require.Equal(t, 1.0, prom_testutil.ToFloat64(db.metrics.compactionsFailed), "`compactions failed` count metric mismatch") actBlocks, err = blockDirs(db.Dir()) diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 33a47ea9e..edcfe5f9c 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -2055,7 +2055,7 @@ func TestNoEmptyBlocks(t *testing.T) { require.NoError(t, err) require.Equal(t, len(db.Blocks()), len(actBlocks)) require.Equal(t, 0, len(actBlocks)) - require.Equal(t, 0, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran)), "no compaction should be triggered here") + require.Equal(t, 0, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "no compaction should be triggered here") }) t.Run("Test no blocks after deleting all samples from head.", func(t *testing.T) { @@ -2069,7 +2069,7 @@ func TestNoEmptyBlocks(t *testing.T) { require.NoError(t, app.Commit()) require.NoError(t, db.Delete(math.MinInt64, math.MaxInt64, defaultMatcher)) require.NoError(t, db.Compact()) - require.Equal(t, 1, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran)), "compaction should have been triggered here") + require.Equal(t, 1, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "compaction should have been triggered here") actBlocks, err := blockDirs(db.Dir()) require.NoError(t, err) @@ -2091,7 +2091,7 @@ func TestNoEmptyBlocks(t *testing.T) { require.NoError(t, app.Commit()) require.NoError(t, db.Compact()) - require.Equal(t, 2, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran)), "compaction should have been triggered here") + require.Equal(t, 2, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "compaction should have been triggered here") actBlocks, err = blockDirs(db.Dir()) require.NoError(t, err) require.Equal(t, len(db.Blocks()), len(actBlocks)) @@ -2112,7 +2112,7 @@ func TestNoEmptyBlocks(t *testing.T) { require.NoError(t, app.Commit()) require.NoError(t, db.head.Delete(math.MinInt64, math.MaxInt64, defaultMatcher)) require.NoError(t, db.Compact()) - require.Equal(t, 3, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran)), "compaction should have been triggered here") + require.Equal(t, 3, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "compaction should have been triggered here") require.Equal(t, oldBlocks, db.Blocks()) }) @@ -2131,7 +2131,7 @@ func TestNoEmptyBlocks(t *testing.T) { require.Equal(t, len(blocks)+len(oldBlocks), len(db.Blocks())) // Ensure all blocks are registered. require.NoError(t, db.Delete(math.MinInt64, math.MaxInt64, defaultMatcher)) require.NoError(t, db.Compact()) - require.Equal(t, 5, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran)), "compaction should have been triggered here once for each block that have tombstones") + require.Equal(t, 5, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "compaction should have been triggered here once for each block that have tombstones") actBlocks, err := blockDirs(db.Dir()) require.NoError(t, err) diff --git a/tsdb/querier.go b/tsdb/querier.go index 74c73f460..4b3144c71 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -180,7 +180,7 @@ func (q *blockChunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, if sortSeries { p = q.index.SortedPostings(p) } - return newBlockChunkSeriesSet(q.blockID, q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming) + return NewBlockChunkSeriesSet(q.blockID, q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming) } func findSetMatches(pattern string) []string { @@ -438,7 +438,7 @@ func (s *seriesData) Labels() labels.Labels { return s.labels } // blockBaseSeriesSet allows to iterate over all series in the single block. // Iterated series are trimmed with given min and max time as well as tombstones. -// See newBlockSeriesSet and newBlockChunkSeriesSet to use it for either sample or chunk iterating. +// See newBlockSeriesSet and NewBlockChunkSeriesSet to use it for either sample or chunk iterating. type blockBaseSeriesSet struct { blockID ulid.ULID p index.Postings @@ -924,7 +924,7 @@ type blockChunkSeriesSet struct { blockBaseSeriesSet } -func newBlockChunkSeriesSet(id ulid.ULID, i IndexReader, c ChunkReader, t tombstones.Reader, p index.Postings, mint, maxt int64, disableTrimming bool) storage.ChunkSeriesSet { +func NewBlockChunkSeriesSet(id ulid.ULID, i IndexReader, c ChunkReader, t tombstones.Reader, p index.Postings, mint, maxt int64, disableTrimming bool) storage.ChunkSeriesSet { return &blockChunkSeriesSet{ blockBaseSeriesSet{ blockID: id, From 6cecb8794164200c2010483b8f22676994bcbe16 Mon Sep 17 00:00:00 2001 From: Soon-Ping Date: Tue, 4 Apr 2023 11:21:13 -0700 Subject: [PATCH 051/632] Generalized rule group iteration evaluation hook (#11885) Signed-off-by: Soon-Ping Phang --- rules/manager.go | 149 ++++++++++++++++++++++++------------------ rules/manager_test.go | 58 +++++++++++----- 2 files changed, 126 insertions(+), 81 deletions(-) diff --git a/rules/manager.go b/rules/manager.go index f8dcf081f..108b6a77a 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -253,7 +253,8 @@ type Group struct { opts *ManagerOptions mtx sync.Mutex evaluationTime time.Duration - lastEvaluation time.Time + lastEvaluation time.Time // Wall-clock time of most recent evaluation. + lastEvalTimestamp time.Time // Time slot used for most recent evaluation. shouldRestore bool @@ -266,22 +267,27 @@ type Group struct { metrics *Metrics - ruleGroupPostProcessFunc RuleGroupPostProcessFunc + // Rule group evaluation iteration function, + // defaults to DefaultEvalIterationFunc. + evalIterationFunc GroupEvalIterationFunc } -// This function will be used before each rule group evaluation if not nil. -// Use this function type if the rule group post processing is needed. -type RuleGroupPostProcessFunc func(g *Group, lastEvalTimestamp time.Time, log log.Logger) error +// GroupEvalIterationFunc is used to implement and extend rule group +// evaluation iteration logic. It is configured in Group.evalIterationFunc, +// and periodically invoked at each group evaluation interval to +// evaluate the rules in the group at that point in time. +// DefaultEvalIterationFunc is the default implementation. +type GroupEvalIterationFunc func(ctx context.Context, g *Group, evalTimestamp time.Time) type GroupOptions struct { - Name, File string - Interval time.Duration - Limit int - Rules []Rule - ShouldRestore bool - Opts *ManagerOptions - done chan struct{} - RuleGroupPostProcessFunc RuleGroupPostProcessFunc + Name, File string + Interval time.Duration + Limit int + Rules []Rule + ShouldRestore bool + Opts *ManagerOptions + done chan struct{} + EvalIterationFunc GroupEvalIterationFunc } // NewGroup makes a new Group with the given name, options, and rules. @@ -302,21 +308,26 @@ func NewGroup(o GroupOptions) *Group { metrics.GroupSamples.WithLabelValues(key) metrics.GroupInterval.WithLabelValues(key).Set(o.Interval.Seconds()) + evalIterationFunc := o.EvalIterationFunc + if evalIterationFunc == nil { + evalIterationFunc = DefaultEvalIterationFunc + } + return &Group{ - name: o.Name, - file: o.File, - interval: o.Interval, - limit: o.Limit, - rules: o.Rules, - shouldRestore: o.ShouldRestore, - opts: o.Opts, - seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)), - done: make(chan struct{}), - managerDone: o.done, - terminated: make(chan struct{}), - logger: log.With(o.Opts.Logger, "file", o.File, "group", o.Name), - metrics: metrics, - ruleGroupPostProcessFunc: o.RuleGroupPostProcessFunc, + name: o.Name, + file: o.File, + interval: o.Interval, + limit: o.Limit, + rules: o.Rules, + shouldRestore: o.ShouldRestore, + opts: o.Opts, + seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)), + done: make(chan struct{}), + managerDone: o.done, + terminated: make(chan struct{}), + logger: log.With(o.Opts.Logger, "file", o.File, "group", o.Name), + metrics: metrics, + evalIterationFunc: evalIterationFunc, } } @@ -341,6 +352,8 @@ func (g *Group) Interval() time.Duration { return g.interval } // Limit returns the group's limit. func (g *Group) Limit() int { return g.limit } +func (g *Group) Logger() log.Logger { return g.logger } + func (g *Group) run(ctx context.Context) { defer close(g.terminated) @@ -359,18 +372,6 @@ func (g *Group) run(ctx context.Context) { }, }) - iter := func() { - g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Inc() - - start := time.Now() - g.Eval(ctx, evalTimestamp) - timeSinceStart := time.Since(start) - - g.metrics.IterationDuration.Observe(timeSinceStart.Seconds()) - g.setEvaluationTime(timeSinceStart) - g.setLastEvaluation(start) - } - // The assumption here is that since the ticker was started after having // waited for `evalTimestamp` to pass, the ticks will trigger soon // after each `evalTimestamp + N * g.interval` occurrence. @@ -400,7 +401,7 @@ func (g *Group) run(ctx context.Context) { }(time.Now()) }() - iter() + g.evalIterationFunc(ctx, g, evalTimestamp) if g.shouldRestore { // If we have to restore, we wait for another Eval to finish. // The reason behind this is, during first eval (or before it) @@ -416,7 +417,7 @@ func (g *Group) run(ctx context.Context) { g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed)) } evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval) - iter() + g.evalIterationFunc(ctx, g, evalTimestamp) } g.RestoreForState(time.Now()) @@ -439,21 +440,29 @@ func (g *Group) run(ctx context.Context) { } evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval) - useRuleGroupPostProcessFunc(g, evalTimestamp.Add(-(missed+1)*g.interval)) - - iter() + g.evalIterationFunc(ctx, g, evalTimestamp) } } } } -func useRuleGroupPostProcessFunc(g *Group, lastEvalTimestamp time.Time) { - if g.ruleGroupPostProcessFunc != nil { - err := g.ruleGroupPostProcessFunc(g, lastEvalTimestamp, g.logger) - if err != nil { - level.Warn(g.logger).Log("msg", "ruleGroupPostProcessFunc failed", "err", err) - } - } +// DefaultEvalIterationFunc is the default implementation of +// GroupEvalIterationFunc that is periodically invoked to evaluate the rules +// in a group at a given point in time and updates Group state and metrics +// accordingly. Custom GroupEvalIterationFunc implementations are recommended +// to invoke this function as well, to ensure correct Group state and metrics +// are maintained. +func DefaultEvalIterationFunc(ctx context.Context, g *Group, evalTimestamp time.Time) { + g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Inc() + + start := time.Now() + g.Eval(ctx, evalTimestamp) + timeSinceStart := time.Since(start) + + g.metrics.IterationDuration.Observe(timeSinceStart.Seconds()) + g.setEvaluationTime(timeSinceStart) + g.setLastEvaluation(start) + g.setLastEvalTimestamp(evalTimestamp) } func (g *Group) stop() { @@ -533,6 +542,20 @@ func (g *Group) setLastEvaluation(ts time.Time) { g.lastEvaluation = ts } +// GetLastEvalTimestamp returns the timestamp of the last evaluation. +func (g *Group) GetLastEvalTimestamp() time.Time { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.lastEvalTimestamp +} + +// setLastEvalTimestamp updates lastEvalTimestamp to the timestamp of the last evaluation. +func (g *Group) setLastEvalTimestamp(ts time.Time) { + g.mtx.Lock() + defer g.mtx.Unlock() + g.lastEvalTimestamp = ts +} + // EvalTimestamp returns the immediately preceding consistently slotted evaluation time. func (g *Group) EvalTimestamp(startTime int64) time.Time { var ( @@ -996,11 +1019,11 @@ func (m *Manager) Stop() { // Update the rule manager's state as the config requires. If // loading the new rules failed the old rule set is restored. -func (m *Manager) Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string, ruleGroupPostProcessFunc RuleGroupPostProcessFunc) error { +func (m *Manager) Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc) error { m.mtx.Lock() defer m.mtx.Unlock() - groups, errs := m.LoadGroups(interval, externalLabels, externalURL, ruleGroupPostProcessFunc, files...) + groups, errs := m.LoadGroups(interval, externalLabels, externalURL, groupEvalIterationFunc, files...) if errs != nil { for _, e := range errs { @@ -1085,7 +1108,7 @@ func (FileLoader) Parse(query string) (parser.Expr, error) { return parser.Parse // LoadGroups reads groups from a list of files. func (m *Manager) LoadGroups( - interval time.Duration, externalLabels labels.Labels, externalURL string, ruleGroupPostProcessFunc RuleGroupPostProcessFunc, filenames ...string, + interval time.Duration, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc, filenames ...string, ) (map[string]*Group, []error) { groups := make(map[string]*Group) @@ -1133,15 +1156,15 @@ func (m *Manager) LoadGroups( } groups[GroupKey(fn, rg.Name)] = NewGroup(GroupOptions{ - Name: rg.Name, - File: fn, - Interval: itv, - Limit: rg.Limit, - Rules: rules, - ShouldRestore: shouldRestore, - Opts: m.opts, - done: m.done, - RuleGroupPostProcessFunc: ruleGroupPostProcessFunc, + Name: rg.Name, + File: fn, + Interval: itv, + Limit: rg.Limit, + Rules: rules, + ShouldRestore: shouldRestore, + Opts: m.opts, + done: m.done, + EvalIterationFunc: groupEvalIterationFunc, }) } } diff --git a/rules/manager_test.go b/rules/manager_test.go index aed289c5b..1faf730be 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -1237,7 +1237,7 @@ func TestRuleHealthUpdates(t *testing.T) { require.Equal(t, HealthBad, rules.Health()) } -func TestUpdateMissedEvalMetrics(t *testing.T) { +func TestRuleGroupEvalIterationFunc(t *testing.T) { suite, err := promql.NewTest(t, ` load 5m http_requests{instance="0"} 75 85 50 0 0 25 0 0 40 0 120 @@ -1254,26 +1254,40 @@ func TestUpdateMissedEvalMetrics(t *testing.T) { testValue := 1 - overrideFunc := func(g *Group, lastEvalTimestamp time.Time, log log.Logger) error { + evalIterationFunc := func(ctx context.Context, g *Group, evalTimestamp time.Time) { testValue = 2 - return nil + DefaultEvalIterationFunc(ctx, g, evalTimestamp) + testValue = 3 + } + + skipEvalIterationFunc := func(ctx context.Context, g *Group, evalTimestamp time.Time) { + testValue = 4 } type testInput struct { - overrideFunc func(g *Group, lastEvalTimestamp time.Time, logger log.Logger) error - expectedValue int + evalIterationFunc GroupEvalIterationFunc + expectedValue int + lastEvalTimestampIsZero bool } tests := []testInput{ - // testValue should still have value of 1 since overrideFunc is nil. + // testValue should still have value of 1 since the default iteration function will be called. { - overrideFunc: nil, - expectedValue: 1, + evalIterationFunc: nil, + expectedValue: 1, + lastEvalTimestampIsZero: false, }, - // testValue should be incremented to 2 since overrideFunc is called. + // testValue should be incremented to 3 since evalIterationFunc is called. { - overrideFunc: overrideFunc, - expectedValue: 2, + evalIterationFunc: evalIterationFunc, + expectedValue: 3, + lastEvalTimestampIsZero: false, + }, + // testValue should be incremented to 4 since skipEvalIterationFunc is called. + { + evalIterationFunc: skipEvalIterationFunc, + expectedValue: 4, + lastEvalTimestampIsZero: true, }, } @@ -1315,12 +1329,12 @@ func TestUpdateMissedEvalMetrics(t *testing.T) { } group := NewGroup(GroupOptions{ - Name: "default", - Interval: time.Second, - Rules: []Rule{rule}, - ShouldRestore: true, - Opts: opts, - RuleGroupPostProcessFunc: tst.overrideFunc, + Name: "default", + Interval: time.Second, + Rules: []Rule{rule}, + ShouldRestore: true, + Opts: opts, + EvalIterationFunc: tst.evalIterationFunc, }) go func() { @@ -1329,10 +1343,18 @@ func TestUpdateMissedEvalMetrics(t *testing.T) { time.Sleep(3 * time.Second) group.stop() + require.Equal(t, tst.expectedValue, testValue) + if tst.lastEvalTimestampIsZero { + require.Zero(t, group.GetLastEvalTimestamp()) + } else { + oneMinute, _ := time.ParseDuration("1m") + require.WithinDuration(t, time.Now(), group.GetLastEvalTimestamp(), oneMinute) + } } - for _, tst := range tests { + for i, tst := range tests { + t.Logf("case %d", i) testFunc(tst) } } From 391473141d86f3a923a45421e5fc0f2ec4d541dc Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Wed, 5 Apr 2023 09:45:39 +0200 Subject: [PATCH 052/632] Check health & ready: move to flags (#12223) This makes it more consistent with other command like import rules. We don't have stricts rules and uniformity accross promtool unfortunately, but I think it's better to only have the http config on relevant check commands to avoid thinking Prometheus can e.g. check the config over the wire. Signed-off-by: Julien Pivotto --- cmd/promtool/main.go | 23 ++++++++--------------- docs/command-line/promtool.md | 19 ++++++++++--------- 2 files changed, 18 insertions(+), 24 deletions(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 064e7a04f..de002a0b2 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -89,7 +89,6 @@ func main() { app.HelpFlag.Short('h') checkCmd := app.Command("check", "Check the resources for validity.") - checkCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath) sdCheckCmd := checkCmd.Command("service-discovery", "Perform service discovery for the given job name and report the results, including relabeling.") sdConfigFile := sdCheckCmd.Arg("config-file", "The prometheus config file.").Required().ExistingFile() @@ -117,16 +116,12 @@ func main() { ).Required().ExistingFiles() checkServerHealthCmd := checkCmd.Command("healthy", "Check if the Prometheus server is healthy.") - serverHealthURLArg := checkServerHealthCmd.Arg( - "server", - "The URL of the Prometheus server to check (e.g. http://localhost:9090)", - ).URL() + checkServerHealthCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath) + checkServerHealthCmd.Flag("url", "The URL for the Prometheus server.").Default("http://localhost:9090").URLVar(&serverURL) checkServerReadyCmd := checkCmd.Command("ready", "Check if the Prometheus server is ready.") - serverReadyURLArg := checkServerReadyCmd.Arg( - "server", - "The URL of the Prometheus server to check (e.g. http://localhost:9090)", - ).URL() + checkServerReadyCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath) + checkServerReadyCmd.Flag("url", "The URL for the Prometheus server.").Default("http://localhost:9090").URLVar(&serverURL) checkRulesCmd := checkCmd.Command("rules", "Check if the rule files are valid or not.") ruleFiles := checkRulesCmd.Arg( @@ -292,10 +287,10 @@ func main() { os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...)) case checkServerHealthCmd.FullCommand(): - os.Exit(checkErr(CheckServerStatus(*serverHealthURLArg, checkHealth, httpRoundTripper))) + os.Exit(checkErr(CheckServerStatus(serverURL, checkHealth, httpRoundTripper))) case checkServerReadyCmd.FullCommand(): - os.Exit(checkErr(CheckServerStatus(*serverReadyURLArg, checkReadiness, httpRoundTripper))) + os.Exit(checkErr(CheckServerStatus(serverURL, checkReadiness, httpRoundTripper))) case checkWebConfigCmd.FullCommand(): os.Exit(CheckWebConfig(*webConfigFiles...)) @@ -390,12 +385,10 @@ func (ls lintConfig) lintDuplicateRules() bool { return ls.all || ls.duplicateRules } -const promDefaultURL = "http://localhost:9090" - // Check server status - healthy & ready. func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper http.RoundTripper) error { - if serverURL == nil { - serverURL, _ = url.Parse(promDefaultURL) + if serverURL.Scheme == "" { + serverURL.Scheme = "http" } config := api.Config{ diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 543034f15..e149d374a 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -58,7 +58,6 @@ Check the resources for validity. | Flag | Description | | --- | --- | -| --http.config.file | HTTP client configuration file for promtool to connect to Prometheus. | | --extended | Print extended information related to the cardinality of the metrics. | @@ -137,11 +136,12 @@ Check if the Prometheus server is healthy. -###### Arguments +###### Flags -| Argument | Description | -| --- | --- | -| server | The URL of the Prometheus server to check (e.g. http://localhost:9090) | +| Flag | Description | Default | +| --- | --- | --- | +| --http.config.file | HTTP client configuration file for promtool to connect to Prometheus. | | +| --url | The URL for the Prometheus server. | `http://localhost:9090` | @@ -152,11 +152,12 @@ Check if the Prometheus server is ready. -###### Arguments +###### Flags -| Argument | Description | -| --- | --- | -| server | The URL of the Prometheus server to check (e.g. http://localhost:9090) | +| Flag | Description | Default | +| --- | --- | --- | +| --http.config.file | HTTP client configuration file for promtool to connect to Prometheus. | | +| --url | The URL for the Prometheus server. | `http://localhost:9090` | From 034eb2b3f2b31589ce1ebd6402e1fb6465b0afd4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Tue, 4 Apr 2023 13:28:02 +0200 Subject: [PATCH 053/632] promtool: read from stdin if no filenames are provided in check rules MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- cmd/promtool/main.go | 21 +++++++++++++++++---- docs/command-line/promtool.md | 4 ++-- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 064e7a04f..abac38d5f 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -131,8 +131,8 @@ func main() { checkRulesCmd := checkCmd.Command("rules", "Check if the rule files are valid or not.") ruleFiles := checkRulesCmd.Arg( "rule-files", - "The rule files to check.", - ).Required().ExistingFiles() + "The rule files to check, default is read from standard input (STDIN).", + ).Default("-").Strings() checkRulesLint := checkRulesCmd.Flag( "lint", "Linting checks to apply. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting", @@ -715,9 +715,22 @@ func CheckRules(ls lintConfig, files ...string) int { } func checkRules(filename string, lintSettings lintConfig) (int, []error) { - fmt.Println("Checking", filename) + var rgs *rulefmt.RuleGroups + var errs []error + if filename == "-" || filename == "" { + var buf bytes.Buffer + tee := io.TeeReader(os.Stdin, &buf) + if _, err := buf.ReadFrom(tee); err != nil { + errs = append(errs, err) + return failureExitCode, errs + } + fmt.Println("Checking stdin") + rgs, errs = rulefmt.Parse(buf.Bytes()) + } else { + fmt.Println("Checking", filename) + rgs, errs = rulefmt.ParseFile(filename) + } - rgs, errs := rulefmt.ParseFile(filename) if errs != nil { return successExitCode, errs } diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 543034f15..6b67408c9 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -179,9 +179,9 @@ Check if the rule files are valid or not. ###### Arguments -| Argument | Description | Required | +| Argument | Description | Default | | --- | --- | --- | -| rule-files | The rule files to check. | Yes | +| rule-files | The rule files to check, default is read from standard input (STDIN). | `-` | From 2b7202c4ccf3ff8e61d4c509c6f6b4283ef1d9b5 Mon Sep 17 00:00:00 2001 From: "Xiaochao Dong (@damnever)" Date: Fri, 9 Dec 2022 15:27:56 +0800 Subject: [PATCH 054/632] Validate the metric names and labels in the remote write handler Signed-off-by: Xiaochao Dong (@damnever) --- storage/remote/write_handler.go | 28 +++++++++++++++++-- storage/remote/write_handler_test.go | 42 ++++++++++++++++++++++++---- web/api/v1/api.go | 2 +- 3 files changed, 63 insertions(+), 9 deletions(-) diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 45304c43c..590a55adb 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -22,6 +22,8 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage" @@ -30,15 +32,28 @@ import ( type writeHandler struct { logger log.Logger appendable storage.Appendable + + samplesWithInvalidLabelsTotal prometheus.Counter } // NewWriteHandler creates a http.Handler that accepts remote write requests and // writes them to the provided appendable. -func NewWriteHandler(logger log.Logger, appendable storage.Appendable) http.Handler { - return &writeHandler{ +func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable) http.Handler { + h := &writeHandler{ logger: logger, appendable: appendable, + + samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "remote_write_invalid_labels_samples_total", + Help: "The total number of remote write samples which contains invalid labels.", + }), } + if reg != nil { + reg.MustRegister(h.samplesWithInvalidLabelsTotal) + } + return h } func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -85,6 +100,7 @@ func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar, func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) { outOfOrderExemplarErrs := 0 + samplesWithInvalidLabels := 0 app := h.appendable.Appender(ctx) defer func() { @@ -98,6 +114,11 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err var exemplarErr error for _, ts := range req.Timeseries { labels := labelProtosToLabels(ts.Labels) + if !labels.IsValid() { + level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", labels.String()) + samplesWithInvalidLabels++ + continue + } for _, s := range ts.Samples { _, err = app.Append(0, labels, s.Timestamp, s.Value) if err != nil { @@ -150,6 +171,9 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err if outOfOrderExemplarErrs > 0 { _ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) } + if samplesWithInvalidLabels > 0 { + h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels)) + } return nil } diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 58c4439fa..8774946ea 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -20,6 +20,8 @@ import ( "io" "net/http" "net/http/httptest" + "strconv" + "strings" "testing" "time" @@ -43,7 +45,7 @@ func TestRemoteWriteHandler(t *testing.T) { require.NoError(t, err) appendable := &mockAppendable{} - handler := NewWriteHandler(nil, appendable) + handler := NewWriteHandler(nil, nil, appendable) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -94,7 +96,7 @@ func TestOutOfOrderSample(t *testing.T) { appendable := &mockAppendable{ latestSample: 100, } - handler := NewWriteHandler(log.NewNopLogger(), appendable) + handler := NewWriteHandler(log.NewNopLogger(), nil, appendable) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -119,7 +121,7 @@ func TestOutOfOrderExemplar(t *testing.T) { appendable := &mockAppendable{ latestExemplar: 100, } - handler := NewWriteHandler(log.NewNopLogger(), appendable) + handler := NewWriteHandler(log.NewNopLogger(), nil, appendable) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -142,7 +144,7 @@ func TestOutOfOrderHistogram(t *testing.T) { appendable := &mockAppendable{ latestHistogram: 100, } - handler := NewWriteHandler(log.NewNopLogger(), appendable) + handler := NewWriteHandler(log.NewNopLogger(), nil, appendable) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -151,6 +153,34 @@ func TestOutOfOrderHistogram(t *testing.T) { require.Equal(t, http.StatusBadRequest, resp.StatusCode) } +func BenchmarkRemoteWritehandler(b *testing.B) { + const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte" + reqs := []*http.Request{} + for i := 0; i < b.N; i++ { + num := strings.Repeat(strconv.Itoa(i), 16) + buf, _, err := buildWriteRequest([]prompb.TimeSeries{{ + Labels: []prompb.Label{ + {Name: "__name__", Value: "test_metric"}, + {Name: "test_label_name_" + num, Value: labelValue + num}, + }, + Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram)}, + }}, nil, nil, nil) + require.NoError(b, err) + req, err := http.NewRequest("", "", bytes.NewReader(buf)) + require.NoError(b, err) + reqs = append(reqs, req) + } + + appendable := &mockAppendable{} + handler := NewWriteHandler(log.NewNopLogger(), nil, appendable) + recorder := httptest.NewRecorder() + + b.ResetTimer() + for _, req := range reqs { + handler.ServeHTTP(recorder, req) + } +} + func TestCommitErr(t *testing.T) { buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil, nil) require.NoError(t, err) @@ -161,7 +191,7 @@ func TestCommitErr(t *testing.T) { appendable := &mockAppendable{ commitErr: fmt.Errorf("commit error"), } - handler := NewWriteHandler(log.NewNopLogger(), appendable) + handler := NewWriteHandler(log.NewNopLogger(), nil, appendable) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -187,7 +217,7 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) { require.NoError(b, db.Close()) }) - handler := NewWriteHandler(log.NewNopLogger(), db.Head()) + handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head()) buf, _, err := buildWriteRequest(genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil) require.NoError(b, err) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 3b6ade562..dcc520ab1 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -284,7 +284,7 @@ func NewAPI( } if ap != nil { - a.remoteWriteHandler = remote.NewWriteHandler(logger, ap) + a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap) } return a From 8472596fd0673b972ca8d1e957ff997fe26db401 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Wed, 5 Apr 2023 11:24:49 +0200 Subject: [PATCH 055/632] fix: apply suggested changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- cmd/promtool/main.go | 18 ++++++++++++------ docs/command-line/promtool.md | 6 +++--- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index abac38d5f..b3d743e44 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -132,7 +132,7 @@ func main() { ruleFiles := checkRulesCmd.Arg( "rule-files", "The rule files to check, default is read from standard input (STDIN).", - ).Default("-").Strings() + ).ExistingFiles() checkRulesLint := checkRulesCmd.Flag( "lint", "Linting checks to apply. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting", @@ -690,6 +690,11 @@ func CheckRules(ls lintConfig, files ...string) int { failed := false hasErrors := false + // add empty string to avoid matching filename + if len(files) == 0 { + files = append(files, "") + } + for _, f := range files { if n, errs := checkRules(f, ls); errs != nil { fmt.Fprintln(os.Stderr, " FAILED:") @@ -717,15 +722,16 @@ func CheckRules(ls lintConfig, files ...string) int { func checkRules(filename string, lintSettings lintConfig) (int, []error) { var rgs *rulefmt.RuleGroups var errs []error - if filename == "-" || filename == "" { - var buf bytes.Buffer - tee := io.TeeReader(os.Stdin, &buf) - if _, err := buf.ReadFrom(tee); err != nil { + + // if filename is an empty string it is a stdin + if filename == "" { + data, err := io.ReadAll(os.Stdin) + if err != nil { errs = append(errs, err) return failureExitCode, errs } fmt.Println("Checking stdin") - rgs, errs = rulefmt.Parse(buf.Bytes()) + rgs, errs = rulefmt.Parse(data) } else { fmt.Println("Checking", filename) rgs, errs = rulefmt.ParseFile(filename) diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 6b67408c9..59c46dd79 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -179,9 +179,9 @@ Check if the rule files are valid or not. ###### Arguments -| Argument | Description | Default | -| --- | --- | --- | -| rule-files | The rule files to check, default is read from standard input (STDIN). | `-` | +| Argument | Description | +| --- | --- | +| rule-files | The rule files to check, default is read from standard input (STDIN). | From 0c75f76193b71f4860e95e5b9dd108361a124380 Mon Sep 17 00:00:00 2001 From: znley Date: Thu, 6 Apr 2023 11:07:10 +0800 Subject: [PATCH 056/632] fix: RLIM_INFINITY type is uint64 on loong64 Signed-off-by: znley --- util/runtime/limits_default.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/util/runtime/limits_default.go b/util/runtime/limits_default.go index c3e0b4701..d49b3bc1e 100644 --- a/util/runtime/limits_default.go +++ b/util/runtime/limits_default.go @@ -18,16 +18,18 @@ package runtime import ( "fmt" + "math" "syscall" ) -// syscall.RLIM_INFINITY is a constant and its default type is int. -// It needs to be converted to an int64 variable to be compared with uint64 values. -// See https://golang.org/ref/spec#Conversions -var unlimited int64 = syscall.RLIM_INFINITY +// syscall.RLIM_INFINITY is a constant. +// Its type is int on most architectures but there are exceptions such as loong64. +// Uniform it to uint accorind to the standard. +// https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/sys_resource.h.html +var unlimited uint64 = syscall.RLIM_INFINITY & math.MaxUint64 func limitToString(v uint64, unit string) string { - if v == uint64(unlimited) { + if v == unlimited { return "unlimited" } return fmt.Sprintf("%d%s", v, unit) From 79db04eb122f9ff9d56574f8139bb797d56a8d45 Mon Sep 17 00:00:00 2001 From: Justin Lei Date: Fri, 3 Mar 2023 13:05:13 -0800 Subject: [PATCH 057/632] Adjust samplesPerChunk from 120 to 220 Signed-off-by: Justin Lei --- tsdb/head_append.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 14e343f74..967c74359 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -1324,10 +1324,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, func (s *memSeries) appendPreprocessor( t int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, ) (c *memChunk, sampleInOrder, chunkCreated bool) { - // Based on Gorilla white papers this offers near-optimal compression ratio - // so anything bigger that this has diminishing returns and increases - // the time range within which we have to decompress all samples. - const samplesPerChunk = 120 + const samplesPerChunk = 220 c = s.head() From c770ba804762d30c4cf9b63b092a3dfd9fed77c4 Mon Sep 17 00:00:00 2001 From: Justin Lei Date: Fri, 3 Mar 2023 13:10:24 -0800 Subject: [PATCH 058/632] Add comment linking to PR Signed-off-by: Justin Lei --- tsdb/head_append.go | 1 + 1 file changed, 1 insertion(+) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 967c74359..46180051e 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -1324,6 +1324,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, func (s *memSeries) appendPreprocessor( t int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, ) (c *memChunk, sampleInOrder, chunkCreated bool) { + // The basis for this number can be found here: https://github.com/prometheus/prometheus/pull/12055 const samplesPerChunk = 220 c = s.head() From 73ff91d182c991daff13ee945bf861c3d48a58c0 Mon Sep 17 00:00:00 2001 From: Justin Lei Date: Fri, 3 Mar 2023 14:27:13 -0800 Subject: [PATCH 059/632] Test fixes Signed-off-by: Justin Lei --- storage/remote/read_handler_test.go | 133 ++++++++++++++-------------- tsdb/db_test.go | 12 +-- tsdb/head_test.go | 24 ++--- 3 files changed, 86 insertions(+), 83 deletions(-) diff --git a/storage/remote/read_handler_test.go b/storage/remote/read_handler_test.go index 261c28e21..0c186097a 100644 --- a/storage/remote/read_handler_test.go +++ b/storage/remote/read_handler_test.go @@ -202,15 +202,18 @@ func BenchmarkStreamReadEndpoint(b *testing.B) { } func TestStreamReadEndpoint(t *testing.T) { - // First with 120 samples. We expect 1 frame with 1 chunk. - // Second with 121 samples, We expect 1 frame with 2 chunks. - // Third with 241 samples. We expect 1 frame with 2 chunks, and 1 frame with 1 chunk for the same series due to bytes limit. + // Note: samplesPerChunk is set to 220, but that isn't cleanly divisible by the chunkRange of 24 hrs and 1 min + // resolution used in this test so tsdb.computeChunkEndTime will put 240 samples in each chunk. + // + // First with 239 samples; we expect 1 frame with 1 full chunk. + // Second with 241 samples; we expect 1 frame with 2 chunks. + // Third with 481 samples; we expect 1 frame with 2 chunks, and 1 frame with 1 chunk for the same series due to bytes limit. // Fourth with 120 histogram samples. We expect 1 frame with 1 chunk. suite, err := promql.NewTest(t, ` load 1m - test_metric1{foo="bar1",baz="qux"} 0+100x119 - test_metric1{foo="bar2",baz="qux"} 0+100x120 - test_metric1{foo="bar3",baz="qux"} 0+100x240 + test_metric1{foo="bar1",baz="qux"} 0+100x239 + test_metric1{foo="bar2",baz="qux"} 0+100x240 + test_metric1{foo="bar3",baz="qux"} 0+100x480 `) require.NoError(t, err) defer suite.Close() @@ -228,8 +231,8 @@ func TestStreamReadEndpoint(t *testing.T) { } }, 1e6, 1, - // Labelset has 57 bytes. Full chunk in test data has roughly 240 bytes. This allows us to have at max 2 chunks in this test. - 57+480, + // Labelset has 57 bytes. Full chunk in test data has roughly 440 bytes. This allows us to have at max 2 chunks in this test. + 57+880, ) // Encode the request. @@ -245,19 +248,19 @@ func TestStreamReadEndpoint(t *testing.T) { matcher4, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test_histogram_metric1") require.NoError(t, err) - query1, err := ToQuery(0, 14400001, []*labels.Matcher{matcher1, matcher2}, &storage.SelectHints{ + query1, err := ToQuery(0, 32460001, []*labels.Matcher{matcher1, matcher2}, &storage.SelectHints{ Step: 1, Func: "avg", Start: 0, - End: 14400001, + End: 32460001, }) require.NoError(t, err) - query2, err := ToQuery(0, 14400001, []*labels.Matcher{matcher1, matcher3}, &storage.SelectHints{ + query2, err := ToQuery(0, 32460001, []*labels.Matcher{matcher1, matcher3}, &storage.SelectHints{ Step: 1, Func: "avg", Start: 0, - End: 14400001, + End: 32460001, }) require.NoError(t, err) @@ -316,8 +319,8 @@ func TestStreamReadEndpoint(t *testing.T) { Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, - MaxTimeMs: 7140000, - Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"), + MaxTimeMs: 14340000, + Data: []byte("\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xd4\x03\xc2|\x05\x94\x00\xc1\xac}\xe9z2\xd0O\xed\xb4n[\aΔ\xa3md\xf9\xd0\xfd\x1aPm\nS\x9d\x0eQ\xad\x06P\xbd\xa8\xbfʁZ\x03(3\xa0R\x87\xda\x05(\x0f\xa0ҁ\xda=(\x13\xa02\x83Z\a(w\xa02\x81Z\x0f(\x13\xb5\x97\xf4P\x1b@\xa5\a\xf4\nP\x1bC\xa5\x02t\x1eP+@e\x1e\xf4\x0ePk@e\x02t:P;A\xa5\x01\xf4\nS\xfb@\xa5\x06t\x06P+C\xe5\x02\xf4\x06Pk@\xe5\x1et\nP\x1bA\xa5\x03\xf4:P\x1b@\xa5\x06t\x0eZJ\xff\\\x85ˈ\u05f8\x0f\xe5+F[\xc8\xe7E)\xed\x14\xa1\xf6\xe2}(v\x8d(N\x83)և(ރ(V\xdaW\xf2\x82t4\xa0m\x05(\xffAJ\x06\xd0҂t\xfc\xa0\xad\x03(oA\xca:\xd02\x82t4\xa0\xed\xb0\xbfҀ\xfa\x05(=\xa0R\x87:\x03(\x15\xa0\xf2\x81z\x03(\xf5\xa0r\x83:\x05(\r\xa1ҁ\xfa\r(\r\xa0R\x9f:\a(5\xa02\x81z\x1f(\x15\xa02\x83:\x05(\xfd\xa0R\x80\xfa\r(\x1d\xa1ҁ:\x03(5\xa0r\xd6g\xf4\xb9\x8b\x97!\xabq\x1f\xcaN\xe0GJ\x1bE)\xfd\x14\xa1\xb6\xf2}(N\x8f(V\x83)އ(փ(N\xdcW҃\xb44\xa0}\x05(\xfbAJ\x19\xd02\x82\xb4\xfc\xa0\xbd\x03(kA\xca9\xd0R\x81\xb44\xa0\xfd\xb4\xbfҀ\xda\x05(3\xa0r\x87Z\x03(\x17\xa0\xf2\x81Z\x03(\xf3\xa0R\x83\xda\x05(\x0f\xa1ҁ\xda\r(\x13\xa02\x9fZ\a(7\xa02\x81Z\x1f(\x13\xa0Ҁ\xda\x05(\xff\xa0R\x80\xda\r "), }, }, }, @@ -336,61 +339,61 @@ func TestStreamReadEndpoint(t *testing.T) { Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, - MaxTimeMs: 7140000, - Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"), - }, - { - Type: prompb.Chunk_XOR, - MinTimeMs: 7200000, - MaxTimeMs: 7200000, - Data: []byte("\000\001\200\364\356\006@\307p\000\000\000\000\000\000"), - }, - }, - }, - }, - }, - { - ChunkedSeries: []*prompb.ChunkedSeries{ - { - Labels: []prompb.Label{ - {Name: "__name__", Value: "test_metric1"}, - {Name: "b", Value: "c"}, - {Name: "baz", Value: "qux"}, - {Name: "d", Value: "e"}, - {Name: "foo", Value: "bar3"}, - }, - Chunks: []prompb.Chunk{ - { - Type: prompb.Chunk_XOR, - MaxTimeMs: 7140000, - Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"), - }, - { - Type: prompb.Chunk_XOR, - MinTimeMs: 7200000, MaxTimeMs: 14340000, - Data: []byte("\000x\200\364\356\006@\307p\000\000\000\000\000\340\324\003\340>\224\355\260\277\322\200\372\005(=\240R\207:\003(\025\240\362\201z\003(\365\240r\203:\005(\r\241\322\201\372\r(\r\240R\237:\007(5\2402\201z\037(\025\2402\203:\005(\375\240R\200\372\r(\035\241\322\201:\003(5\240r\326g\364\271\213\227!\253q\037\312N\340GJ\033E)\375\024\241\266\362}(N\217(V\203)\336\207(\326\203(N\334W\322\203\2644\240}\005(\373AJ\031\3202\202\264\374\240\275\003(kA\3129\320R\201\2644\240\375\264\277\322\200\332\005(3\240r\207Z\003(\027\240\362\201Z\003(\363\240R\203\332\005(\017\241\322\201\332\r(\023\2402\237Z\007(7\2402\201Z\037(\023\240\322\200\332\005(\377\240R\200\332\r "), + Data: []byte("\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xd4\x03\xc2|\x05\x94\x00\xc1\xac}\xe9z2\xd0O\xed\xb4n[\aΔ\xa3md\xf9\xd0\xfd\x1aPm\nS\x9d\x0eQ\xad\x06P\xbd\xa8\xbfʁZ\x03(3\xa0R\x87\xda\x05(\x0f\xa0ҁ\xda=(\x13\xa02\x83Z\a(w\xa02\x81Z\x0f(\x13\xb5\x97\xf4P\x1b@\xa5\a\xf4\nP\x1bC\xa5\x02t\x1eP+@e\x1e\xf4\x0ePk@e\x02t:P;A\xa5\x01\xf4\nS\xfb@\xa5\x06t\x06P+C\xe5\x02\xf4\x06Pk@\xe5\x1et\nP\x1bA\xa5\x03\xf4:P\x1b@\xa5\x06t\x0eZJ\xff\\\x85ˈ\u05f8\x0f\xe5+F[\xc8\xe7E)\xed\x14\xa1\xf6\xe2}(v\x8d(N\x83)և(ރ(V\xdaW\xf2\x82t4\xa0m\x05(\xffAJ\x06\xd0҂t\xfc\xa0\xad\x03(oA\xca:\xd02\x82t4\xa0\xed\xb0\xbfҀ\xfa\x05(=\xa0R\x87:\x03(\x15\xa0\xf2\x81z\x03(\xf5\xa0r\x83:\x05(\r\xa1ҁ\xfa\r(\r\xa0R\x9f:\a(5\xa02\x81z\x1f(\x15\xa02\x83:\x05(\xfd\xa0R\x80\xfa\r(\x1d\xa1ҁ:\x03(5\xa0r\xd6g\xf4\xb9\x8b\x97!\xabq\x1f\xcaN\xe0GJ\x1bE)\xfd\x14\xa1\xb6\xf2}(N\x8f(V\x83)އ(փ(N\xdcW҃\xb44\xa0}\x05(\xfbAJ\x19\xd02\x82\xb4\xfc\xa0\xbd\x03(kA\xca9\xd0R\x81\xb44\xa0\xfd\xb4\xbfҀ\xda\x05(3\xa0r\x87Z\x03(\x17\xa0\xf2\x81Z\x03(\xf3\xa0R\x83\xda\x05(\x0f\xa1ҁ\xda\r(\x13\xa02\x9fZ\a(7\xa02\x81Z\x1f(\x13\xa0Ҁ\xda\x05(\xff\xa0R\x80\xda\r "), }, - }, - }, - }, - }, - { - ChunkedSeries: []*prompb.ChunkedSeries{ - { - Labels: []prompb.Label{ - {Name: "__name__", Value: "test_metric1"}, - {Name: "b", Value: "c"}, - {Name: "baz", Value: "qux"}, - {Name: "d", Value: "e"}, - {Name: "foo", Value: "bar3"}, - }, - Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, MinTimeMs: 14400000, MaxTimeMs: 14400000, - Data: []byte("\000\001\200\350\335\r@\327p\000\000\000\000\000\000"), + Data: []byte("\x00\x01\x80\xe8\xdd\r@\xd7p\x00\x00\x00\x00\x00\x00"), + }, + }, + }, + }, + }, + { + ChunkedSeries: []*prompb.ChunkedSeries{ + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "test_metric1"}, + {Name: "b", Value: "c"}, + {Name: "baz", Value: "qux"}, + {Name: "d", Value: "e"}, + {Name: "foo", Value: "bar3"}, + }, + Chunks: []prompb.Chunk{ + { + Type: prompb.Chunk_XOR, + MaxTimeMs: 14340000, + Data: []byte("\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xd4\x03\xc2|\x05\x94\x00\xc1\xac}\xe9z2\xd0O\xed\xb4n[\aΔ\xa3md\xf9\xd0\xfd\x1aPm\nS\x9d\x0eQ\xad\x06P\xbd\xa8\xbfʁZ\x03(3\xa0R\x87\xda\x05(\x0f\xa0ҁ\xda=(\x13\xa02\x83Z\a(w\xa02\x81Z\x0f(\x13\xb5\x97\xf4P\x1b@\xa5\a\xf4\nP\x1bC\xa5\x02t\x1eP+@e\x1e\xf4\x0ePk@e\x02t:P;A\xa5\x01\xf4\nS\xfb@\xa5\x06t\x06P+C\xe5\x02\xf4\x06Pk@\xe5\x1et\nP\x1bA\xa5\x03\xf4:P\x1b@\xa5\x06t\x0eZJ\xff\\\x85ˈ\u05f8\x0f\xe5+F[\xc8\xe7E)\xed\x14\xa1\xf6\xe2}(v\x8d(N\x83)և(ރ(V\xdaW\xf2\x82t4\xa0m\x05(\xffAJ\x06\xd0҂t\xfc\xa0\xad\x03(oA\xca:\xd02\x82t4\xa0\xed\xb0\xbfҀ\xfa\x05(=\xa0R\x87:\x03(\x15\xa0\xf2\x81z\x03(\xf5\xa0r\x83:\x05(\r\xa1ҁ\xfa\r(\r\xa0R\x9f:\a(5\xa02\x81z\x1f(\x15\xa02\x83:\x05(\xfd\xa0R\x80\xfa\r(\x1d\xa1ҁ:\x03(5\xa0r\xd6g\xf4\xb9\x8b\x97!\xabq\x1f\xcaN\xe0GJ\x1bE)\xfd\x14\xa1\xb6\xf2}(N\x8f(V\x83)އ(փ(N\xdcW҃\xb44\xa0}\x05(\xfbAJ\x19\xd02\x82\xb4\xfc\xa0\xbd\x03(kA\xca9\xd0R\x81\xb44\xa0\xfd\xb4\xbfҀ\xda\x05(3\xa0r\x87Z\x03(\x17\xa0\xf2\x81Z\x03(\xf3\xa0R\x83\xda\x05(\x0f\xa1ҁ\xda\r(\x13\xa02\x9fZ\a(7\xa02\x81Z\x1f(\x13\xa0Ҁ\xda\x05(\xff\xa0R\x80\xda\r "), + }, + { + Type: prompb.Chunk_XOR, + MinTimeMs: 14400000, + MaxTimeMs: 28740000, + Data: []byte("\x00\xf0\x80\xe8\xdd\r@\xd7p\x00\x00\x00\x00\x00\xe0\xd4\x03\xe0G\xca+C)\xbd\x1c\xb6\x19\xfdh\x06P\x13\xa0i@v\x83\xa5\x00\xfa\x02\x94\x0fh\nP\xf3\xa0\x19@V\x81\xe5\x01z\x01\x94\x1dh\x0eP3\xa0)@6\x8f\xa5\x01\xfa\x06\x94\x03h\nPs\xa09@րe\x01z\x1f\x94\x05h\x06P3\xa0)A\xf6\x80\xa5\x00\xfa\x06\x94\ai\xfaP\x13\xa0\x19@ր\xe5\az\x01\x94\x05h\x1eP\x13\xa1\xe9@6\x80\xa5\x03\xfa\x02\x94\x03h:P\x13\xa0y@V\x80e\x1fz\x03\x94\rh\x06P\x13\xa0\xe9@v\x81\xa5\x00\xfa\x02\x94?h\nP3\xa0\x19@V\x83\xe5\x01z\x01\x94\rh\x0eZ\x8e\xff\xad\xccjSnC\xe9O\xdcH\xe9Ch\xa53\xa3\x97\x02}h2\x85\xe8\xf2\x85h2\x9c\xe8R\x8fhR\x83\xed\xe5}(;CJ\t\xd02\x8e\xb4\x1c\xa1\xbd\x03(+O\xca\t\xd0ҁ\xb4\x14\xa3\xfd\x05(\x1bCJ\tۋ\xff(\x15\xa02\x83z\a(u\xa02\x81:\r(\x1d\xa3Ҁ\xfa\x05(=\xa0R\x87:\x03(\x15\xa0\xf2\x81z\x03)\xf5\xa0r\x83:\x05(\r\xa1ҁ\xfa\r(\r\xa0R\x8f:\a(5\xa02\x81z\x1f(\x15\xa02\x83:\x05-\xa6\x7f\xda\x02\x94\x03\xe8\x1aP\x1d\xa0\xe9@N\x80e\x03Z\x03\x94=\xe8\x06P\x15\xa0y@N\x83\xa5\x00\xda\x02\x94\x0f\xe8\nP\r\xa3\xe9@N\x81\xe5\x01Z\x01\x94\x1d\xe8\x0eP5\xa0\x19@N\x87\xa5\x01\xda\x06\x94\x03\xe8\nP}\xa0)@\u0380e\x01Z\x7f\x94\x05\xe8\x06P5\xa09A\u0380\xa5\x00\xda\x06\x94\a\xe8zP\r\xa0)@\u0380\xe5\aZ\x01\x94\x05\xe8\x1eP\x15\xa0\x19G\u0380\xa5\x03\xda\x02\x94\x03\xe8:P\x1d\xa0i"), + }, + }, + }, + }, + }, + { + ChunkedSeries: []*prompb.ChunkedSeries{ + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "test_metric1"}, + {Name: "b", Value: "c"}, + {Name: "baz", Value: "qux"}, + {Name: "d", Value: "e"}, + {Name: "foo", Value: "bar3"}, + }, + Chunks: []prompb.Chunk{ + { + Type: prompb.Chunk_XOR, + MinTimeMs: 28800000, + MaxTimeMs: 28800000, + Data: []byte("\x00\x01\x80л\x1b@\xe7p\x00\x00\x00\x00\x00\x00"), }, }, }, @@ -409,8 +412,8 @@ func TestStreamReadEndpoint(t *testing.T) { Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, - MaxTimeMs: 7140000, - Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"), + MaxTimeMs: 14340000, + Data: []byte("\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xd4\x03\xc2|\x05\x94\x00\xc1\xac}\xe9z2\xd0O\xed\xb4n[\aΔ\xa3md\xf9\xd0\xfd\x1aPm\nS\x9d\x0eQ\xad\x06P\xbd\xa8\xbfʁZ\x03(3\xa0R\x87\xda\x05(\x0f\xa0ҁ\xda=(\x13\xa02\x83Z\a(w\xa02\x81Z\x0f(\x13\xb5\x97\xf4P\x1b@\xa5\a\xf4\nP\x1bC\xa5\x02t\x1eP+@e\x1e\xf4\x0ePk@e\x02t:P;A\xa5\x01\xf4\nS\xfb@\xa5\x06t\x06P+C\xe5\x02\xf4\x06Pk@\xe5\x1et\nP\x1bA\xa5\x03\xf4:P\x1b@\xa5\x06t\x0eZJ\xff\\\x85ˈ\u05f8\x0f\xe5+F[\xc8\xe7E)\xed\x14\xa1\xf6\xe2}(v\x8d(N\x83)և(ރ(V\xdaW\xf2\x82t4\xa0m\x05(\xffAJ\x06\xd0҂t\xfc\xa0\xad\x03(oA\xca:\xd02\x82t4\xa0\xed\xb0\xbfҀ\xfa\x05(=\xa0R\x87:\x03(\x15\xa0\xf2\x81z\x03(\xf5\xa0r\x83:\x05(\r\xa1ҁ\xfa\r(\r\xa0R\x9f:\a(5\xa02\x81z\x1f(\x15\xa02\x83:\x05(\xfd\xa0R\x80\xfa\r(\x1d\xa1ҁ:\x03(5\xa0r\xd6g\xf4\xb9\x8b\x97!\xabq\x1f\xcaN\xe0GJ\x1bE)\xfd\x14\xa1\xb6\xf2}(N\x8f(V\x83)އ(փ(N\xdcW҃\xb44\xa0}\x05(\xfbAJ\x19\xd02\x82\xb4\xfc\xa0\xbd\x03(kA\xca9\xd0R\x81\xb44\xa0\xfd\xb4\xbfҀ\xda\x05(3\xa0r\x87Z\x03(\x17\xa0\xf2\x81Z\x03(\xf3\xa0R\x83\xda\x05(\x0f\xa1ҁ\xda\r(\x13\xa02\x9fZ\a(7\xa02\x81Z\x1f(\x13\xa0Ҁ\xda\x05(\xff\xa0R\x80\xda\r "), }, }, }, diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 70639085e..14297c3dc 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -247,8 +247,8 @@ func TestNoPanicAfterWALCorruption(t *testing.T) { var maxt int64 ctx := context.Background() { - // Appending 121 samples because on the 121st a new chunk will be created. - for i := 0; i < 121; i++ { + // Appending 221 samples because on the 221st a new chunk will be created. + for i := 0; i < 221; i++ { app := db.Appender(ctx) _, err := app.Append(0, labels.FromStrings("foo", "bar"), maxt, 0) expSamples = append(expSamples, sample{t: maxt, v: 0}) @@ -1089,9 +1089,9 @@ func TestWALReplayRaceOnSamplesLoggedBeforeSeries(t *testing.T) { numSamplesBeforeSeriesCreation = 1000 ) - // We test both with few and many samples appended after series creation. If samples are < 120 then there's no + // We test both with few and many samples appended after series creation. If samples are < 220 then there's no // mmap-ed chunk, otherwise there's at least 1 mmap-ed chunk when replaying the WAL. - for _, numSamplesAfterSeriesCreation := range []int{1, 1000} { + for _, numSamplesAfterSeriesCreation := range []int{1, 2000} { for run := 1; run <= numRuns; run++ { t.Run(fmt.Sprintf("samples after series creation = %d, run = %d", numSamplesAfterSeriesCreation, run), func(t *testing.T) { testWALReplayRaceOnSamplesLoggedBeforeSeries(t, numSamplesBeforeSeriesCreation, numSamplesAfterSeriesCreation) @@ -1160,8 +1160,8 @@ func testWALReplayRaceOnSamplesLoggedBeforeSeries(t *testing.T, numSamplesBefore } require.NoError(t, chunksIt.Err()) - // We expect 1 chunk every 120 samples after series creation. - require.Equalf(t, (numSamplesAfterSeriesCreation/120)+1, actualChunks, "series: %s", set.At().Labels().String()) + // We expect 1 chunk every 220 samples after series creation. + require.Equalf(t, (numSamplesAfterSeriesCreation/220)+1, actualChunks, "series: %s", set.At().Labels().String()) } require.NoError(t, set.Err()) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 0a2a2ee6f..9a7220957 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -808,7 +808,7 @@ func TestMemSeries_truncateChunks(t *testing.T) { s := newMemSeries(labels.FromStrings("a", "b"), 1, defaultIsolationDisabled) - for i := 0; i < 4000; i += 5 { + for i := 0; i < 8000; i += 5 { ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange) require.True(t, ok, "sample append failed") } @@ -825,9 +825,9 @@ func TestMemSeries_truncateChunks(t *testing.T) { require.NotNil(t, chk) require.NoError(t, err) - s.truncateChunksBefore(2000, 0) + s.truncateChunksBefore(4000, 0) - require.Equal(t, int64(2000), s.mmappedChunks[0].minTime) + require.Equal(t, int64(4000), s.mmappedChunks[0].minTime) _, _, err = s.chunk(0, chunkDiskMapper, &memChunkPool) require.Equal(t, storage.ErrNotFound, err, "first chunks not gone") require.Equal(t, countBefore/2, len(s.mmappedChunks)+1) // +1 for the head chunk. @@ -1364,9 +1364,9 @@ func TestMemSeries_append(t *testing.T) { require.Equal(t, int64(1000), s.headChunk.minTime, "wrong chunk range") require.Equal(t, int64(1001), s.headChunk.maxTime, "wrong chunk range") - // Fill the range [1000,2000) with many samples. Intermediate chunks should be cut - // at approximately 120 samples per chunk. - for i := 1; i < 1000; i++ { + // Fill the range [1000,3000) with many samples. Intermediate chunks should be cut + // at approximately 220 samples per chunk. + for i := 1; i < 2000; i++ { ok, _ := s.append(1001+int64(i), float64(i), 0, chunkDiskMapper, chunkRange) require.True(t, ok, "append failed") } @@ -1437,7 +1437,7 @@ func TestMemSeries_appendHistogram(t *testing.T) { } func TestMemSeries_append_atVariableRate(t *testing.T) { - const samplesPerChunk = 120 + const samplesPerChunk = 220 dir := t.TempDir() // This is usually taken from the Head, but passing manually here. chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize) @@ -2983,7 +2983,7 @@ func TestAppendHistogram(t *testing.T) { } func TestHistogramInWALAndMmapChunk(t *testing.T) { - head, _ := newTestHead(t, 3000, false, false) + head, _ := newTestHead(t, 6000, false, false) t.Cleanup(func() { require.NoError(t, head.Close()) }) @@ -2992,7 +2992,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { // Series with only histograms. s1 := labels.FromStrings("a", "b1") k1 := s1.String() - numHistograms := 300 + numHistograms := 600 exp := map[string][]tsdbutil.Sample{} ts := int64(0) var app storage.Appender @@ -3728,7 +3728,7 @@ func TestHistogramCounterResetHeader(t *testing.T) { checkExpCounterResetHeader(chunkenc.CounterReset) // Add 2 non-counter reset histograms. - for i := 0; i < 250; i++ { + for i := 0; i < 500; i++ { appendHistogram(h) } checkExpCounterResetHeader(chunkenc.NotCounterReset, chunkenc.NotCounterReset) @@ -3756,7 +3756,7 @@ func TestHistogramCounterResetHeader(t *testing.T) { checkExpCounterResetHeader(chunkenc.CounterReset) // Add 2 non-counter reset histograms. Just to have some non-counter reset chunks in between. - for i := 0; i < 250; i++ { + for i := 0; i < 500; i++ { appendHistogram(h) } checkExpCounterResetHeader(chunkenc.NotCounterReset, chunkenc.NotCounterReset) @@ -4223,7 +4223,7 @@ func TestHeadInit_DiscardChunksWithUnsupportedEncoding(t *testing.T) { h.chunkDiskMapper.WriteChunk(chunks.HeadSeriesRef(seriesRef), 500, 600, uc, false, func(err error) { require.NoError(t, err) }) app = h.Appender(ctx) - for i := 700; i < 1200; i++ { + for i := 700; i < 1700; i++ { _, err := app.Append(0, seriesLabels, int64(i), float64(i)) require.NoError(t, err) } From 83f43982c93cc776d7ef4b74c782f8f1eae72a37 Mon Sep 17 00:00:00 2001 From: Justin Lei Date: Mon, 27 Mar 2023 17:02:20 -0700 Subject: [PATCH 060/632] Add support for native histograms to concreteSeriesIterator Signed-off-by: Justin Lei --- storage/remote/codec.go | 196 +++++++++++++++++++++------ storage/remote/codec_test.go | 176 ++++++++++++++++++++++-- storage/remote/write_handler.go | 2 +- storage/remote/write_handler_test.go | 2 +- tsdb/chunkenc/chunk.go | 2 +- 5 files changed, 326 insertions(+), 52 deletions(-) diff --git a/storage/remote/codec.go b/storage/remote/codec.go index ba802bccb..78534ed93 100644 --- a/storage/remote/codec.go +++ b/storage/remote/codec.go @@ -17,6 +17,7 @@ import ( "errors" "fmt" "io" + "math" "net/http" "sort" "strings" @@ -173,7 +174,7 @@ func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet return errSeriesSet{err: err} } lbls := labelProtosToLabels(ts.Labels) - series = append(series, &concreteSeries{labels: lbls, samples: ts.Samples}) + series = append(series, &concreteSeries{labels: lbls, floats: ts.Samples, histograms: ts.Histograms}) } if sortSeries { @@ -359,8 +360,9 @@ func (c *concreteSeriesSet) Warnings() storage.Warnings { return nil } // concreteSeries implements storage.Series. type concreteSeries struct { - labels labels.Labels - samples []prompb.Sample + labels labels.Labels + floats []prompb.Sample + histograms []prompb.Histogram } func (c *concreteSeries) Labels() labels.Labels { @@ -372,84 +374,168 @@ func (c *concreteSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { csi.reset(c) return csi } - return newConcreteSeriersIterator(c) + return newConcreteSeriesIterator(c) } // concreteSeriesIterator implements storage.SeriesIterator. type concreteSeriesIterator struct { - cur int - series *concreteSeries + floatsCur int + histogramsCur int + curValType chunkenc.ValueType + series *concreteSeries } -func newConcreteSeriersIterator(series *concreteSeries) chunkenc.Iterator { +func newConcreteSeriesIterator(series *concreteSeries) chunkenc.Iterator { return &concreteSeriesIterator{ - cur: -1, - series: series, + floatsCur: -1, + histogramsCur: -1, + curValType: chunkenc.ValNone, + series: series, } } func (c *concreteSeriesIterator) reset(series *concreteSeries) { - c.cur = -1 + c.floatsCur = -1 + c.histogramsCur = -1 + c.curValType = chunkenc.ValNone c.series = series } // Seek implements storage.SeriesIterator. func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType { - if c.cur == -1 { - c.cur = 0 + if c.floatsCur == -1 { + c.floatsCur = 0 } - if c.cur >= len(c.series.samples) { + if c.histogramsCur == -1 { + c.histogramsCur = 0 + } + if c.floatsCur >= len(c.series.floats) && c.histogramsCur >= len(c.series.histograms) { return chunkenc.ValNone } + // No-op check. - if s := c.series.samples[c.cur]; s.Timestamp >= t { - return chunkenc.ValFloat + if (c.curValType == chunkenc.ValFloat && c.series.floats[c.floatsCur].Timestamp >= t) || + ((c.curValType == chunkenc.ValHistogram || c.curValType == chunkenc.ValFloatHistogram) && c.series.histograms[c.histogramsCur].Timestamp >= t) { + return c.curValType } - // Do binary search between current position and end. - c.cur += sort.Search(len(c.series.samples)-c.cur, func(n int) bool { - return c.series.samples[n+c.cur].Timestamp >= t + + c.curValType = chunkenc.ValNone + + // Binary search between current position and end for both float and histograms samples. + c.floatsCur += sort.Search(len(c.series.floats)-c.floatsCur, func(n int) bool { + return c.series.floats[n+c.floatsCur].Timestamp >= t }) - if c.cur < len(c.series.samples) { - return chunkenc.ValFloat + c.histogramsCur += sort.Search(len(c.series.histograms)-c.histogramsCur, func(n int) bool { + return c.series.histograms[n+c.histogramsCur].Timestamp >= t + }) + + if c.floatsCur < len(c.series.floats) && c.histogramsCur < len(c.series.histograms) { + // If float samples and histogram samples have overlapping timestamps prefer the float samples. + if c.series.floats[c.floatsCur].Timestamp <= c.series.histograms[c.histogramsCur].Timestamp { + c.curValType = chunkenc.ValFloat + } else { + c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur]) + } + // When the timestamps do not overlap the cursor for the non-selected sample type has advanced too + // far; we decrement it back down here. + if c.series.floats[c.floatsCur].Timestamp != c.series.histograms[c.histogramsCur].Timestamp { + if c.curValType == chunkenc.ValFloat { + c.histogramsCur-- + } else { + c.floatsCur-- + } + } + } else if c.floatsCur < len(c.series.floats) { + c.curValType = chunkenc.ValFloat + } else if c.histogramsCur < len(c.series.histograms) { + c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur]) } - return chunkenc.ValNone - // TODO(beorn7): Add histogram support. + + return c.curValType +} + +func getHistogramValType(h *prompb.Histogram) chunkenc.ValueType { + _, isInt := h.GetCount().(*prompb.Histogram_CountInt) + if isInt { + return chunkenc.ValHistogram + } + return chunkenc.ValFloatHistogram } // At implements chunkenc.Iterator. func (c *concreteSeriesIterator) At() (t int64, v float64) { - s := c.series.samples[c.cur] + if c.curValType != chunkenc.ValFloat { + panic("iterator is not on a float sample") + } + s := c.series.floats[c.floatsCur] return s.Timestamp, s.Value } -// AtHistogram always returns (0, nil) because there is no support for histogram -// values yet. -// TODO(beorn7): Fix that for histogram support in remote storage. +// AtHistogram implements chunkenc.Iterator func (c *concreteSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { - return 0, nil + if c.curValType != chunkenc.ValHistogram { + panic("iterator is not on an integer histogram sample") + } + h := c.series.histograms[c.histogramsCur] + return h.Timestamp, HistogramProtoToHistogram(h) } -// AtFloatHistogram always returns (0, nil) because there is no support for histogram -// values yet. -// TODO(beorn7): Fix that for histogram support in remote storage. +// AtFloatHistogram implements chunkenc.Iterator func (c *concreteSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { - return 0, nil + switch c.curValType { + case chunkenc.ValHistogram: + fh := c.series.histograms[c.histogramsCur] + return fh.Timestamp, HistogramProtoToFloatHistogram(fh) + case chunkenc.ValFloatHistogram: + fh := c.series.histograms[c.histogramsCur] + return fh.Timestamp, FloatHistogramProtoToFloatHistogram(fh) + default: + panic("iterator is not on a histogram sample") + } } // AtT implements chunkenc.Iterator. func (c *concreteSeriesIterator) AtT() int64 { - s := c.series.samples[c.cur] - return s.Timestamp + if c.curValType == chunkenc.ValHistogram || c.curValType == chunkenc.ValFloatHistogram { + return c.series.histograms[c.histogramsCur].Timestamp + } + return c.series.floats[c.floatsCur].Timestamp } +const noTS = int64(math.MaxInt64) + // Next implements chunkenc.Iterator. func (c *concreteSeriesIterator) Next() chunkenc.ValueType { - c.cur++ - if c.cur < len(c.series.samples) { - return chunkenc.ValFloat + peekFloatTS := noTS + if c.floatsCur+1 < len(c.series.floats) { + peekFloatTS = c.series.floats[c.floatsCur+1].Timestamp } - return chunkenc.ValNone - // TODO(beorn7): Add histogram support. + peekHistTS := noTS + if c.histogramsCur+1 < len(c.series.histograms) { + peekHistTS = c.series.histograms[c.histogramsCur+1].Timestamp + } + c.curValType = chunkenc.ValNone + + if peekFloatTS < peekHistTS { + c.floatsCur++ + c.curValType = chunkenc.ValFloat + } else if peekHistTS < peekFloatTS { + c.histogramsCur++ + c.curValType = chunkenc.ValHistogram + } else if peekFloatTS == noTS && peekHistTS == noTS { + // This only happens when the iterator is exhausted; we set the cursors off the end to prevent + // Seek() from returning anything afterwards. + c.floatsCur = len(c.series.floats) + c.histogramsCur = len(c.series.histograms) + } else { + // Prefer float samples to histogram samples if there's a conflict. We advance the cursor for histograms + // anyway otherwise the histogram sample will get selected on the next call to Next(). + c.floatsCur++ + c.histogramsCur++ + c.curValType = chunkenc.ValFloat + } + + return c.curValType } // Err implements chunkenc.Iterator. @@ -557,10 +643,10 @@ func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram { } } -// HistogramProtoToFloatHistogram extracts a (normal integer) Histogram from the +// FloatHistogramProtoToFloatHistogram extracts a float Histogram from the // provided proto message to a Float Histogram. The caller has to make sure that -// the proto message represents an float histogram and not a integer histogram. -func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram { +// the proto message represents a float histogram and not an integer histogram. +func FloatHistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram { return &histogram.FloatHistogram{ CounterResetHint: histogram.CounterResetHint(hp.ResetHint), Schema: hp.Schema, @@ -575,6 +661,24 @@ func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogr } } +// HistogramProtoToFloatHistogram extracts and converts a (normal integer) histogram from the provided proto message +// to a float histogram. The caller has to make sure that the proto message represents an integer histogram and not a +// float histogram. +func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram { + return &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(hp.ResetHint), + Schema: hp.Schema, + ZeroThreshold: hp.ZeroThreshold, + ZeroCount: float64(hp.GetZeroCountInt()), + Count: float64(hp.GetCountInt()), + Sum: hp.Sum, + PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()), + PositiveBuckets: deltasToCounts(hp.GetPositiveDeltas()), + NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()), + NegativeBuckets: deltasToCounts(hp.GetNegativeDeltas()), + } +} + func spansProtoToSpans(s []prompb.BucketSpan) []histogram.Span { spans := make([]histogram.Span, len(s)) for i := 0; i < len(s); i++ { @@ -584,6 +688,16 @@ func spansProtoToSpans(s []prompb.BucketSpan) []histogram.Span { return spans } +func deltasToCounts(deltas []int64) []float64 { + counts := make([]float64, len(deltas)) + var cur float64 + for i, d := range deltas { + cur += float64(d) + counts[i] = cur + } + return counts +} + func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) prompb.Histogram { return prompb.Histogram{ Count: &prompb.Histogram_CountInt{CountInt: h.Count}, diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index 4137f91aa..36d0d7c31 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -29,6 +29,7 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/tsdbutil" ) var testHistogram = histogram.Histogram{ @@ -174,12 +175,12 @@ func TestValidateLabelsAndMetricName(t *testing.T) { func TestConcreteSeriesSet(t *testing.T) { series1 := &concreteSeries{ - labels: labels.FromStrings("foo", "bar"), - samples: []prompb.Sample{{Value: 1, Timestamp: 2}}, + labels: labels.FromStrings("foo", "bar"), + floats: []prompb.Sample{{Value: 1, Timestamp: 2}}, } series2 := &concreteSeries{ - labels: labels.FromStrings("foo", "baz"), - samples: []prompb.Sample{{Value: 3, Timestamp: 4}}, + labels: labels.FromStrings("foo", "baz"), + floats: []prompb.Sample{{Value: 3, Timestamp: 4}}, } c := &concreteSeriesSet{ series: []storage.Series{series1, series2}, @@ -206,10 +207,10 @@ func TestConcreteSeriesClonesLabels(t *testing.T) { require.Equal(t, lbls, gotLabels) } -func TestConcreteSeriesIterator(t *testing.T) { +func TestConcreteSeriesIterator_FloatSamples(t *testing.T) { series := &concreteSeries{ labels: labels.FromStrings("foo", "bar"), - samples: []prompb.Sample{ + floats: []prompb.Sample{ {Value: 1, Timestamp: 1}, {Value: 1.5, Timestamp: 1}, {Value: 2, Timestamp: 2}, @@ -255,6 +256,165 @@ func TestConcreteSeriesIterator(t *testing.T) { require.Equal(t, chunkenc.ValNone, it.Seek(2)) } +func TestConcreteSeriesIterator_HistogramSamples(t *testing.T) { + histograms := tsdbutil.GenerateTestHistograms(5) + histProtos := make([]prompb.Histogram, len(histograms)) + for i, h := range histograms { + // Results in ts sequence of 1, 1, 2, 3, 4. + var ts int64 + if i == 0 { + ts = 1 + } else { + ts = int64(i) + } + histProtos[i] = HistogramToHistogramProto(ts, h) + } + series := &concreteSeries{ + labels: labels.FromStrings("foo", "bar"), + histograms: histProtos, + } + it := series.Iterator(nil) + + // Seek to the first sample with ts=1. + require.Equal(t, chunkenc.ValHistogram, it.Seek(1)) + ts, v := it.AtHistogram() + require.Equal(t, int64(1), ts) + require.Equal(t, histograms[0], v) + + // Seek one further, next sample still has ts=1. + require.Equal(t, chunkenc.ValHistogram, it.Next()) + ts, v = it.AtHistogram() + require.Equal(t, int64(1), ts) + require.Equal(t, histograms[1], v) + + // Seek again to 1 and make sure we stay where we are. + require.Equal(t, chunkenc.ValHistogram, it.Seek(1)) + ts, v = it.AtHistogram() + require.Equal(t, int64(1), ts) + require.Equal(t, histograms[1], v) + + // Another seek. + require.Equal(t, chunkenc.ValHistogram, it.Seek(3)) + ts, v = it.AtHistogram() + require.Equal(t, int64(3), ts) + require.Equal(t, histograms[3], v) + + // And we don't go back. + require.Equal(t, chunkenc.ValHistogram, it.Seek(2)) + ts, v = it.AtHistogram() + require.Equal(t, int64(3), ts) + require.Equal(t, histograms[3], v) + + // Seek beyond the end. + require.Equal(t, chunkenc.ValNone, it.Seek(5)) + // And we don't go back. (This exposes issue #10027.) + require.Equal(t, chunkenc.ValNone, it.Seek(2)) +} + +func TestConcreteSeriesIterator_FloatAndHistogramSamples(t *testing.T) { + // Series starts as histograms, then transitions to floats at ts=8 (with an overlap from ts=8 to ts=10), then + // transitions back to histograms at ts=16. + histograms := tsdbutil.GenerateTestHistograms(15) + histProtos := make([]prompb.Histogram, len(histograms)) + for i, h := range histograms { + if i < 10 { + histProtos[i] = HistogramToHistogramProto(int64(i+1), h) + } else { + histProtos[i] = HistogramToHistogramProto(int64(i+6), h) + } + } + series := &concreteSeries{ + labels: labels.FromStrings("foo", "bar"), + floats: []prompb.Sample{ + {Value: 1, Timestamp: 8}, + {Value: 2, Timestamp: 9}, + {Value: 3, Timestamp: 10}, + {Value: 4, Timestamp: 11}, + {Value: 5, Timestamp: 12}, + {Value: 6, Timestamp: 13}, + {Value: 7, Timestamp: 14}, + {Value: 8, Timestamp: 15}, + }, + histograms: histProtos, + } + it := series.Iterator(nil) + + var ( + ts int64 + v float64 + h *histogram.Histogram + fh *histogram.FloatHistogram + ) + require.Equal(t, chunkenc.ValHistogram, it.Next()) + ts, h = it.AtHistogram() + require.Equal(t, int64(1), ts) + require.Equal(t, histograms[0], h) + + require.Equal(t, chunkenc.ValHistogram, it.Next()) + ts, h = it.AtHistogram() + require.Equal(t, int64(2), ts) + require.Equal(t, histograms[1], h) + + // Seek to the first float/histogram sample overlap at ts=8 (should prefer the float sample). + require.Equal(t, chunkenc.ValFloat, it.Seek(8)) + ts, v = it.At() + require.Equal(t, int64(8), ts) + require.Equal(t, 1., v) + + // Attempting to seek backwards should do nothing. + require.Equal(t, chunkenc.ValFloat, it.Seek(1)) + ts, v = it.At() + require.Equal(t, int64(8), ts) + require.Equal(t, 1., v) + + // Seeking to 8 again should also do nothing. + require.Equal(t, chunkenc.ValFloat, it.Seek(8)) + ts, v = it.At() + require.Equal(t, int64(8), ts) + require.Equal(t, 1., v) + + // Again, should prefer the float sample. + require.Equal(t, chunkenc.ValFloat, it.Next()) + ts, v = it.At() + require.Equal(t, int64(9), ts) + require.Equal(t, 2., v) + + // Seek to ts=11 where there are only float samples. + require.Equal(t, chunkenc.ValFloat, it.Seek(11)) + ts, v = it.At() + require.Equal(t, int64(11), ts) + require.Equal(t, 4., v) + + // Seek to ts=15 right before the transition back to histogram samples. + require.Equal(t, chunkenc.ValFloat, it.Seek(15)) + ts, v = it.At() + require.Equal(t, int64(15), ts) + require.Equal(t, 8., v) + + require.Equal(t, chunkenc.ValHistogram, it.Next()) + ts, h = it.AtHistogram() + require.Equal(t, int64(16), ts) + require.Equal(t, histograms[10], h) + + // Getting a float histogram from an int histogram works. + require.Equal(t, chunkenc.ValHistogram, it.Next()) + ts, fh = it.AtFloatHistogram() + require.Equal(t, int64(17), ts) + expected := HistogramProtoToFloatHistogram(HistogramToHistogramProto(int64(17), histograms[11])) + require.Equal(t, expected, fh) + + // Keep calling Next() until the end. + for i := 0; i < 3; i++ { + require.Equal(t, chunkenc.ValHistogram, it.Next()) + } + + // The iterator is exhausted. + require.Equal(t, chunkenc.ValNone, it.Next()) + require.Equal(t, chunkenc.ValNone, it.Next()) + // Should also not be able to seek backwards again. + require.Equal(t, chunkenc.ValNone, it.Seek(1)) +} + func TestFromQueryResultWithDuplicates(t *testing.T) { ts1 := prompb.TimeSeries{ Labels: []prompb.Label{ @@ -368,7 +528,7 @@ func TestNilHistogramProto(t *testing.T) { // This function will panic if it impromperly handles nil // values, causing the test to fail. HistogramProtoToHistogram(prompb.Histogram{}) - HistogramProtoToFloatHistogram(prompb.Histogram{}) + FloatHistogramProtoToFloatHistogram(prompb.Histogram{}) } func exampleHistogram() histogram.Histogram { @@ -563,7 +723,7 @@ func TestFloatHistogramToProtoConvert(t *testing.T) { require.Equal(t, p, FloatHistogramToHistogramProto(1337, &h)) - require.Equal(t, h, *HistogramProtoToFloatHistogram(p)) + require.Equal(t, h, *FloatHistogramProtoToFloatHistogram(p)) } } diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 45304c43c..5aa113088 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -126,7 +126,7 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err for _, hp := range ts.Histograms { if hp.GetCountFloat() > 0 || hp.GetZeroCountFloat() > 0 { // It is a float histogram. - fhs := HistogramProtoToFloatHistogram(hp) + fhs := FloatHistogramProtoToFloatHistogram(hp) _, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, fhs) } else { hs := HistogramProtoToHistogram(hp) diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 58c4439fa..9c787f17e 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -69,7 +69,7 @@ func TestRemoteWriteHandler(t *testing.T) { for _, hp := range ts.Histograms { if hp.GetCountFloat() > 0 || hp.GetZeroCountFloat() > 0 { // It is a float histogram. - fh := HistogramProtoToFloatHistogram(hp) + fh := FloatHistogramProtoToFloatHistogram(hp) require.Equal(t, mockHistogram{labels, hp.Timestamp, nil, fh}, appendable.histograms[k]) } else { h := HistogramProtoToHistogram(hp) diff --git a/tsdb/chunkenc/chunk.go b/tsdb/chunkenc/chunk.go index c550cbc78..1ebef3eb1 100644 --- a/tsdb/chunkenc/chunk.go +++ b/tsdb/chunkenc/chunk.go @@ -96,7 +96,7 @@ type Iterator interface { // timestamp equal or greater than t. If the current sample found by a // previous `Next` or `Seek` operation already has this property, Seek // has no effect. If a sample has been found, Seek returns the type of - // its value. Otherwise, it returns ValNone, after with the iterator is + // its value. Otherwise, it returns ValNone, after which the iterator is // exhausted. Seek(t int64) ValueType // At returns the current timestamp/value pair if the value is a float. From f90013a5a0a0eb55ace4bd9f994769aceef2a0fc Mon Sep 17 00:00:00 2001 From: Justin Lei <97976793+leizor@users.noreply.github.com> Date: Wed, 5 Apr 2023 16:09:09 -0700 Subject: [PATCH 061/632] Update storage/remote/codec.go MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Björn Rabenstein Signed-off-by: Justin Lei <97976793+leizor@users.noreply.github.com> --- storage/remote/codec.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/storage/remote/codec.go b/storage/remote/codec.go index 78534ed93..bfbd08d24 100644 --- a/storage/remote/codec.go +++ b/storage/remote/codec.go @@ -455,8 +455,7 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType { } func getHistogramValType(h *prompb.Histogram) chunkenc.ValueType { - _, isInt := h.GetCount().(*prompb.Histogram_CountInt) - if isInt { + if _, isInt := h.GetCount().(*prompb.Histogram_CountInt); isInt { return chunkenc.ValHistogram } return chunkenc.ValFloatHistogram From cca7178a120d40740a9957cc0567d081643ad1bc Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Fri, 7 Apr 2023 18:03:19 +0200 Subject: [PATCH 062/632] tsdb: Improve a couple of histogram documentation comments Signed-off-by: Arve Knudsen --- tsdb/head_append.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 14e343f74..6bc91ae06 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -430,7 +430,7 @@ func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTi return false, headMaxt - t, storage.ErrOutOfOrderSample } -// appendableHistogram checks whether the given sample is valid for appending to the series. +// appendableHistogram checks whether the given histogram is valid for appending to the series. func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error { c := s.head() if c == nil { @@ -452,7 +452,7 @@ func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error { return nil } -// appendableFloatHistogram checks whether the given sample is valid for appending to the series. +// appendableFloatHistogram checks whether the given float histogram is valid for appending to the series. func (s *memSeries) appendableFloatHistogram(t int64, fh *histogram.FloatHistogram) error { c := s.head() if c == nil { From d5b33e1419b0fdc70719a45b747e155fa1a0781c Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Sat, 8 Apr 2023 10:26:06 +0200 Subject: [PATCH 063/632] =?UTF-8?q?yamllint:=20don=E2=80=99t=20check=20key?= =?UTF-8?q?s=20and=20add=20file=20extension=20to=20config=20file?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Matthieu MOREL Signed-off-by: Matthieu MOREL --- .yamllint => .yamllint.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) rename .yamllint => .yamllint.yml (90%) diff --git a/.yamllint b/.yamllint.yml similarity index 90% rename from .yamllint rename to .yamllint.yml index 19552574b..955a5a627 100644 --- a/.yamllint +++ b/.yamllint.yml @@ -20,5 +20,4 @@ rules: config/testdata/section_key_dup.bad.yml line-length: disable truthy: - ignore: | - .github/workflows/*.yml + check-keys: false From fb67d368a218d7279fe63d791f48a33e88113ad7 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Tue, 11 Apr 2023 13:45:34 -0700 Subject: [PATCH 064/632] use consistent error for instant and range query 400 Signed-off-by: Ben Ye --- web/api/v1/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 3b6ade562..a210e0001 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -522,7 +522,7 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) { } qry, err := api.QueryEngine.NewRangeQuery(api.Queryable, opts, r.FormValue("query"), start, end, step) if err != nil { - return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + return invalidParamError(err, "query") } // From now on, we must only return with a finalizer in the result (to // be called by the caller) or call qry.Close ourselves (which is From b6573353c1495da061c25e6a08859fd3ffcd47a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Mierzwa?= Date: Wed, 12 Apr 2023 14:05:06 +0100 Subject: [PATCH 065/632] Add query_samples_total metric MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit query_samples_total is a counter that tracks the total number of samples loaded by all queries. The goal with this metric is to be able to see the amount of 'work' done by Prometheus to service queries. At the moment we have metrics with the number of queries, plus more detailed metrics showing how much time each step of a query takes. While those metrics do help they don't show us the whole picture. Queries that do load more samples are (in general) more expensive than queries that do load fewer samples. This means that looking only at the number of queries doesn't tell us how much 'work' Prometheus received. Adding a counter that tracks the total number of samples loaded allows us to see if there was a spike in the cost of queries, not just the number of them. Signed-off-by: Łukasz Mierzwa --- promql/engine.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/promql/engine.go b/promql/engine.go index 52278c7a2..60a575508 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -70,6 +70,7 @@ type engineMetrics struct { queryPrepareTime prometheus.Observer queryInnerEval prometheus.Observer queryResultSort prometheus.Observer + querySamples prometheus.Counter } // convertibleToInt64 returns true if v does not over-/underflow an int64. @@ -332,6 +333,12 @@ func NewEngine(opts EngineOpts) *Engine { Name: "queries_concurrent_max", Help: "The max number of concurrent queries.", }), + querySamples: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "query_samples_total", + Help: "The total number of samples loaded by all queries.", + }), queryQueueTime: queryResultSummary.WithLabelValues("queue_time"), queryPrepareTime: queryResultSummary.WithLabelValues("prepare_time"), queryInnerEval: queryResultSummary.WithLabelValues("inner_eval"), @@ -357,6 +364,7 @@ func NewEngine(opts EngineOpts) *Engine { metrics.maxConcurrentQueries, metrics.queryLogEnabled, metrics.queryLogFailures, + metrics.querySamples, queryResultSummary, ) } @@ -537,7 +545,10 @@ func (ng *Engine) newTestQuery(f func(context.Context) error) Query { // statements are not handled by the Engine. func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storage.Warnings, err error) { ng.metrics.currentQueries.Inc() - defer ng.metrics.currentQueries.Dec() + defer func() { + ng.metrics.currentQueries.Dec() + ng.metrics.querySamples.Add(float64(q.sampleStats.TotalSamples)) + }() ctx, cancel := context.WithTimeout(ctx, ng.timeout) q.cancel = cancel From 01d0dda4fc4c8e57636dbce7021307377340217f Mon Sep 17 00:00:00 2001 From: Alex Le Date: Wed, 12 Apr 2023 14:18:20 -0700 Subject: [PATCH 066/632] Rename PopulateBlockFunc to BlockPopulator Signed-off-by: Alex Le --- tsdb/compact.go | 18 +++++++++--------- tsdb/compact_test.go | 6 +++--- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/tsdb/compact.go b/tsdb/compact.go index b2d412375..fef1c646d 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -392,10 +392,10 @@ func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta { // Compact creates a new block in the compactor's directory from the blocks in the // provided directories. func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (uid ulid.ULID, err error) { - return c.CompactWithPopulateBlockFunc(dest, dirs, open, DefaultPopulateBlockFunc{}) + return c.CompactWithBlockPopulator(dest, dirs, open, DefaultBlockPopulator{}) } -func (c *LeveledCompactor) CompactWithPopulateBlockFunc(dest string, dirs []string, open []*Block, populateBlockFunc PopulateBlockFunc) (uid ulid.ULID, err error) { +func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, open []*Block, blockPopulator BlockPopulator) (uid ulid.ULID, err error) { var ( blocks []BlockReader bs []*Block @@ -439,7 +439,7 @@ func (c *LeveledCompactor) CompactWithPopulateBlockFunc(dest string, dirs []stri uid = ulid.MustNew(ulid.Now(), rand.Reader) meta := CompactBlockMetas(uid, metas...) - err = c.write(dest, meta, populateBlockFunc, blocks...) + err = c.write(dest, meta, blockPopulator, blocks...) if err == nil { if meta.Stats.NumSamples == 0 { for _, b := range bs { @@ -505,7 +505,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, p } } - err := c.write(dest, meta, DefaultPopulateBlockFunc{}, b) + err := c.write(dest, meta, DefaultBlockPopulator{}, b) if err != nil { return uid, err } @@ -550,7 +550,7 @@ func (w *instrumentedChunkWriter) WriteChunks(chunks ...chunks.Meta) error { } // write creates a new block that is the union of the provided blocks into dir. -func (c *LeveledCompactor) write(dest string, meta *BlockMeta, populateBlockFunc PopulateBlockFunc, blocks ...BlockReader) (err error) { +func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator BlockPopulator, blocks ...BlockReader) (err error) { dir := filepath.Join(dest, meta.ULID.String()) tmp := dir + tmpForCreationBlockDirSuffix var closers []io.Closer @@ -598,7 +598,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, populateBlockFunc } closers = append(closers, indexw) - if err := populateBlockFunc.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, indexw, chunkw); err != nil { + if err := blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, indexw, chunkw); err != nil { return errors.Wrap(err, "populate block") } @@ -663,16 +663,16 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, populateBlockFunc return nil } -type PopulateBlockFunc interface { +type BlockPopulator interface { PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) error } -type DefaultPopulateBlockFunc struct{} +type DefaultBlockPopulator struct{} // PopulateBlock fills the index and chunk writers with new data gathered as the union // of the provided blocks. It returns meta information for the new block. // It expects sorted blocks input by mint. -func (c DefaultPopulateBlockFunc) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) (err error) { +func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) (err error) { if len(blocks) == 0 { return errors.New("cannot populate block from no readers") } diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 1b0684521..20ebf40df 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -441,7 +441,7 @@ func TestCompactionFailWillCleanUpTempDir(t *testing.T) { tmpdir := t.TempDir() - require.Error(t, compactor.write(tmpdir, &BlockMeta{}, DefaultPopulateBlockFunc{}, erringBReader{})) + require.Error(t, compactor.write(tmpdir, &BlockMeta{}, DefaultBlockPopulator{}, erringBReader{})) _, err = os.Stat(filepath.Join(tmpdir, BlockMeta{}.ULID.String()) + tmpForCreationBlockDirSuffix) require.True(t, os.IsNotExist(err), "directory is not cleaned up") } @@ -953,8 +953,8 @@ func TestCompaction_populateBlock(t *testing.T) { } iw := &mockIndexWriter{} - populateBlockFunc := DefaultPopulateBlockFunc{} - err = populateBlockFunc.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, iw, nopChunkWriter{}) + blockPopulator := DefaultBlockPopulator{} + err = blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, iw, nopChunkWriter{}) if tc.expErr != nil { require.Error(t, err) require.Equal(t, tc.expErr.Error(), err.Error()) From 10cc60af013c5cfc33f4cc377c423e8120030276 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 13 Apr 2023 11:07:54 +0000 Subject: [PATCH 067/632] labels: add ScratchBuilder.Overwrite for slice implementation This is a method used by some downstream projects; it was created to optimize the implementation in `labels_string.go` but we should have one for both implementations so the same code works with either. Signed-off-by: Bryan Boreham --- model/labels/labels.go | 6 ++++++ model/labels/labels_string.go | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/model/labels/labels.go b/model/labels/labels.go index e3a478a25..b7398d17f 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -631,3 +631,9 @@ func (b *ScratchBuilder) Labels() Labels { // Copy the slice, so the next use of ScratchBuilder doesn't overwrite. return append([]Label{}, b.add...) } + +// Write the newly-built Labels out to ls. +// Callers must ensure that there are no other references to ls, or any strings fetched from it. +func (b *ScratchBuilder) Overwrite(ls *Labels) { + *ls = append((*ls)[:0], b.add...) +} diff --git a/model/labels/labels_string.go b/model/labels/labels_string.go index d830d5a43..f293c9167 100644 --- a/model/labels/labels_string.go +++ b/model/labels/labels_string.go @@ -811,7 +811,7 @@ func (b *ScratchBuilder) Labels() Labels { } // Write the newly-built Labels out to ls, reusing an internal buffer. -// Callers must ensure that there are no other references to ls. +// Callers must ensure that there are no other references to ls, or any strings fetched from it. func (b *ScratchBuilder) Overwrite(ls *Labels) { size := labelsSize(b.add) if size <= cap(b.overwriteBuffer) { From c0879d64cf66d7902b838fd73376c30b344055c1 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Fri, 28 Oct 2022 16:58:40 +0200 Subject: [PATCH 068/632] promql: Separate `Point` into `FPoint` and `HPoint` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In other words: Instead of having a “polymorphous” `Point` that can either contain a float value or a histogram value, use an `FPoint` for floats and an `HPoint` for histograms. This seemingly small change has a _lot_ of repercussions throughout the codebase. The idea here is to avoid the increase in size of `Point` arrays that happened after native histograms had been added. The higher-level data structures (`Sample`, `Series`, etc.) are still “polymorphous”. The same idea could be applied to them, but at each step the trade-offs needed to be evaluated. The idea with this change is to do the minimum necessary to get back to pre-histogram performance for functions that do not touch histograms. Here are comparisons for the `changes` function. The test data doesn't include histograms yet. Ideally, there would be no change in the benchmark result at all. First runtime v2.39 compared to directly prior to this commit: ``` name old time/op new time/op delta RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 542µs ± 1% +38.58% (p=0.000 n=9+8) RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 617µs ± 2% +36.48% (p=0.000 n=10+10) RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.36ms ± 2% +21.58% (p=0.000 n=8+10) RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 8.94ms ± 1% +14.21% (p=0.000 n=10+10) RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.30ms ± 1% +10.67% (p=0.000 n=9+10) RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.10ms ± 1% +11.82% (p=0.000 n=10+10) RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 11.8ms ± 1% +12.50% (p=0.000 n=8+10) RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 87.4ms ± 1% +12.63% (p=0.000 n=9+9) RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 32.8ms ± 1% +8.01% (p=0.000 n=10+10) RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.6ms ± 2% +9.64% (p=0.000 n=10+10) RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 117ms ± 1% +11.69% (p=0.000 n=10+10) RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 876ms ± 1% +11.83% (p=0.000 n=9+10) ``` And then runtime v2.39 compared to after this commit: ``` name old time/op new time/op delta RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 547µs ± 1% +39.84% (p=0.000 n=9+8) RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 616µs ± 2% +36.15% (p=0.000 n=10+10) RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.26ms ± 1% +12.20% (p=0.000 n=8+10) RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 7.95ms ± 1% +1.59% (p=0.000 n=10+8) RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.38ms ± 2% +13.49% (p=0.000 n=9+10) RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.02ms ± 1% +9.80% (p=0.000 n=10+9) RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 10.8ms ± 1% +3.08% (p=0.000 n=8+10) RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 78.1ms ± 1% +0.58% (p=0.035 n=9+10) RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 33.5ms ± 4% +10.18% (p=0.000 n=10+10) RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.0ms ± 1% +7.98% (p=0.000 n=10+10) RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 107ms ± 1% +1.92% (p=0.000 n=10+10) RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 775ms ± 1% -1.02% (p=0.019 n=9+9) ``` In summary, the runtime doesn't really improve with this change for queries with just a few steps. For queries with many steps, this commit essentially reinstates the old performance. This is good because the many-step queries are the one that matter most (longest absolute runtime). In terms of allocations, though, this commit doesn't make a dent at all (numbers not shown). The reason is that most of the allocations happen in the sampleRingIterator (in the storage package), which has to be addressed in a separate commit. Signed-off-by: beorn7 --- cmd/promtool/unittest.go | 5 +- docs/querying/functions.md | 35 ++- promql/engine.go | 392 +++++++++++++++++++----------- promql/engine_test.go | 141 ++++++----- promql/functions.go | 475 ++++++++++++++++++++++--------------- promql/functions_test.go | 2 +- promql/quantile.go | 2 +- promql/test.go | 26 +- promql/test_test.go | 22 +- promql/value.go | 243 +++++++++++-------- rules/alerting.go | 10 +- rules/alerting_test.go | 46 ++-- rules/manager.go | 5 +- rules/manager_test.go | 40 ++-- rules/recording_test.go | 24 +- template/template.go | 2 +- template/template_test.go | 26 +- util/jsonutil/marshal.go | 79 +++++- web/api/v1/api.go | 163 ++++--------- web/api/v1/api_test.go | 48 ++-- web/federate.go | 6 +- web/federate_test.go | 29 ++- 22 files changed, 1062 insertions(+), 759 deletions(-) diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index cc40ac9d0..b3e6f67f9 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -347,7 +347,7 @@ Outer: for _, s := range got { gotSamples = append(gotSamples, parsedSample{ Labels: s.Metric.Copy(), - Value: s.V, + Value: s.F, }) } @@ -447,7 +447,8 @@ func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, q return v, nil case promql.Scalar: return promql.Vector{promql.Sample{ - Point: promql.Point{T: v.T, V: v.V}, + T: v.T, + F: v.V, Metric: labels.Labels{}, }}, nil default: diff --git a/docs/querying/functions.md b/docs/querying/functions.md index b4bc0a743..e92a58937 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -17,9 +17,7 @@ _Notes about the experimental native histograms:_ flag](../feature_flags/#native-histograms). As long as no native histograms have been ingested into the TSDB, all functions will behave as usual. * Functions that do not explicitly mention native histograms in their - documentation (see below) effectively treat a native histogram as a float - sample of value 0. (This is confusing and will change before native - histograms become a stable feature.) + documentation (see below) will ignore histogram samples. * Functions that do already act on native histograms might still change their behavior in the future. * If a function requires the same bucket layout between multiple native @@ -404,6 +402,8 @@ For each timeseries in `v`, `label_join(v instant-vector, dst_label string, sepa using `separator` and returns the timeseries with the label `dst_label` containing the joined value. There can be any number of `src_labels` in this function. +`label_join` acts on float and histogram samples in the same way. + This example will return a vector with each time series having a `foo` label with the value `a,b,c` added to it: ``` @@ -419,6 +419,8 @@ of `replacement`, together with the original labels in the input. Capturing grou regular expression can be referenced with `$1`, `$2`, etc. If the regular expression doesn't match then the timeseries is returned unchanged. +`label_replace` acts on float and histogram samples in the same way. + This example will return timeseries with the values `a:c` at label `service` and `a` at label `foo`: ``` @@ -501,10 +503,21 @@ counter resets when your target restarts. For each input time series, `resets(v range-vector)` returns the number of counter resets within the provided time range as an instant vector. Any -decrease in the value between two consecutive samples is interpreted as a -counter reset. +decrease in the value between two consecutive float samples is interpreted as a +counter reset. A reset in a native histogram is detected in a more complex way: +Any decrease in any bucket, including the zero bucket, or in the count of +observation constitutes a counter reset, but also the disappearance of any +previously populated bucket, an increase in bucket resolution, or a decrease of +the zero-bucket width. -`resets` should only be used with counters. +`resets` should only be used with counters and counter-like native +histograms. + +If the range vector contains a mix of float and histogram samples for the same +series, counter resets are detected separately and their numbers added up. The +change from a float to a histogram sample is _not_ considered a counter +reset. Each float sample is compared to the next float sample, and each +histogram is comprared to the next histogram. ## `round()` @@ -526,7 +539,7 @@ have exactly one element, `scalar` will return `NaN`. ## `sort()` `sort(v instant-vector)` returns vector elements sorted by their sample values, -in ascending order. +in ascending order. Native histograms are sorted by their sum of observations. ## `sort_desc()` @@ -545,7 +558,8 @@ expression is to be evaluated. ## `timestamp()` `timestamp(v instant-vector)` returns the timestamp of each of the samples of -the given vector as the number of seconds since January 1, 1970 UTC. +the given vector as the number of seconds since January 1, 1970 UTC. It also +works with histogram samples. ## `vector()` @@ -569,12 +583,15 @@ over time and return an instant vector with per-series aggregation results: * `quantile_over_time(scalar, range-vector)`: the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified interval. * `stddev_over_time(range-vector)`: the population standard deviation of the values in the specified interval. * `stdvar_over_time(range-vector)`: the population standard variance of the values in the specified interval. -* `last_over_time(range-vector)`: the most recent point value in specified interval. +* `last_over_time(range-vector)`: the most recent point value in the specified interval. * `present_over_time(range-vector)`: the value 1 for any series in the specified interval. Note that all values in the specified interval have the same weight in the aggregation even if the values are not equally spaced throughout the interval. +`count_over_time`, `last_over_time`, and `present_over_time` handle native +histograms as expected. All other functions ignore histogram samples. + ## Trigonometric Functions The trigonometric functions work in radians: diff --git a/promql/engine.go b/promql/engine.go index 52278c7a2..4c6751088 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -189,7 +189,8 @@ func (q *query) Cancel() { // Close implements the Query interface. func (q *query) Close() { for _, s := range q.matrix { - putPointSlice(s.Points) + putFPointSlice(s.Floats) + putHPointSlice(s.Histograms) } } @@ -680,11 +681,15 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval for i, s := range mat { // Point might have a different timestamp, force it to the evaluation // timestamp as that is when we ran the evaluation. - vector[i] = Sample{Metric: s.Metric, Point: Point{V: s.Points[0].V, H: s.Points[0].H, T: start}} + if len(s.Histograms) > 0 { + vector[i] = Sample{Metric: s.Metric, H: s.Histograms[0].H, T: start} + } else { + vector[i] = Sample{Metric: s.Metric, F: s.Floats[0].F, T: start} + } } return vector, warnings, nil case parser.ValueTypeScalar: - return Scalar{V: mat[0].Points[0].V, T: start}, warnings, nil + return Scalar{V: mat[0].Floats[0].F, T: start}, warnings, nil case parser.ValueTypeMatrix: return mat, warnings, nil default: @@ -940,9 +945,10 @@ type errWithWarnings struct { func (e errWithWarnings) Error() string { return e.err.Error() } -// An evaluator evaluates given expressions over given fixed timestamps. It -// is attached to an engine through which it connects to a querier and reports -// errors. On timeout or cancellation of its context it terminates. +// An evaluator evaluates the given expressions over the given fixed +// timestamps. It is attached to an engine through which it connects to a +// querier and reports errors. On timeout or cancellation of its context it +// terminates. type evaluator struct { ctx context.Context @@ -1137,17 +1143,35 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) } for si, series := range matrixes[i] { - for _, point := range series.Points { + for _, point := range series.Floats { if point.T == ts { if ev.currentSamples < ev.maxSamples { - vectors[i] = append(vectors[i], Sample{Metric: series.Metric, Point: point}) + vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: point.F, T: ts}) if prepSeries != nil { bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) } // Move input vectors forward so we don't have to re-scan the same // past points at the next step. - matrixes[i][si].Points = series.Points[1:] + matrixes[i][si].Floats = series.Floats[1:] + ev.currentSamples++ + } else { + ev.error(ErrTooManySamples(env)) + } + } + break + } + for _, point := range series.Histograms { + if point.T == ts { + if ev.currentSamples < ev.maxSamples { + vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: point.H, T: ts}) + if prepSeries != nil { + bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) + } + + // Move input vectors forward so we don't have to re-scan the same + // past points at the next step. + matrixes[i][si].Histograms = series.Histograms[1:] ev.currentSamples++ } else { ev.error(ErrTooManySamples(env)) @@ -1184,8 +1208,11 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) if ev.endTimestamp == ev.startTimestamp { mat := make(Matrix, len(result)) for i, s := range result { - s.Point.T = ts - mat[i] = Series{Metric: s.Metric, Points: []Point{s.Point}} + if s.H == nil { + mat[i] = Series{Metric: s.Metric, Floats: []FPoint{{T: ts, F: s.F}}} + } else { + mat[i] = Series{Metric: s.Metric, Histograms: []HPoint{{T: ts, H: s.H}}} + } } ev.currentSamples = originalNumSamples + mat.TotalSamples() ev.samplesStats.UpdatePeak(ev.currentSamples) @@ -1197,22 +1224,28 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) h := sample.Metric.Hash() ss, ok := seriess[h] if !ok { - ss = Series{ - Metric: sample.Metric, - Points: getPointSlice(numSteps), - } + ss = Series{Metric: sample.Metric} + } + if sample.H == nil { + if ss.Floats == nil { + ss.Floats = getFPointSlice(numSteps) + } + ss.Floats = append(ss.Floats, FPoint{T: ts, F: sample.F}) + } else { + if ss.Histograms == nil { + ss.Histograms = getHPointSlice(numSteps) + } + ss.Histograms = append(ss.Histograms, HPoint{T: ts, H: sample.H}) } - sample.Point.T = ts - ss.Points = append(ss.Points, sample.Point) seriess[h] = ss - } } // Reuse the original point slices. for _, m := range origMatrixes { for _, s := range m { - putPointSlice(s.Points) + putFPointSlice(s.Floats) + putHPointSlice(s.Histograms) } } // Assemble the output matrix. By the time we get here we know we don't have too many samples. @@ -1253,7 +1286,7 @@ func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSele } totalSamples := 0 for _, s := range mat { - totalSamples += len(s.Points) + totalSamples += len(s.Floats) + len(s.Histograms) vs.Series = append(vs.Series, NewStorageSeries(s)) } return ms, totalSamples, ws @@ -1297,7 +1330,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { var param float64 if e.Param != nil { - param = v[0].(Vector)[0].V + param = v[0].(Vector)[0].F } return ev.aggregation(e.Op, sortedGrouping, e.Without, param, v[1].(Vector), sh[1], enh), nil }, e.Param, e.Expr) @@ -1396,7 +1429,8 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { stepRange = ev.interval } // Reuse objects across steps to save memory allocations. - points := getPointSlice(16) + var floats []FPoint + var histograms []HPoint inMatrix := make(Matrix, 1) inArgs[matrixArgIndex] = inMatrix enh := &EvalNodeHelper{Out: make(Vector, 0, 1)} @@ -1404,8 +1438,13 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { it := storage.NewBuffer(selRange) var chkIter chunkenc.Iterator for i, s := range selVS.Series { - ev.currentSamples -= len(points) - points = points[:0] + ev.currentSamples -= len(floats) + len(histograms) + if floats != nil { + floats = floats[:0] + } + if histograms != nil { + histograms = histograms[:0] + } chkIter = s.Iterator(chkIter) it.Reset(chkIter) metric := selVS.Series[i].Labels() @@ -1418,7 +1457,6 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } ss := Series{ Metric: metric, - Points: getPointSlice(numSteps), } inMatrix[0].Metric = selVS.Series[i].Labels() for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { @@ -1428,44 +1466,54 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { // when looking up the argument, as there will be no gaps. for j := range e.Args { if j != matrixArgIndex { - otherInArgs[j][0].V = otherArgs[j][0].Points[step].V + otherInArgs[j][0].F = otherArgs[j][0].Floats[step].F } } maxt := ts - offset mint := maxt - selRange // Evaluate the matrix selector for this series for this step. - points = ev.matrixIterSlice(it, mint, maxt, points) - if len(points) == 0 { + floats, histograms = ev.matrixIterSlice(it, mint, maxt, floats, histograms) + if len(floats)+len(histograms) == 0 { continue } - inMatrix[0].Points = points + inMatrix[0].Floats = floats + inMatrix[0].Histograms = histograms enh.Ts = ts // Make the function call. outVec := call(inArgs, e.Args, enh) - ev.samplesStats.IncrementSamplesAtStep(step, int64(len(points))) + ev.samplesStats.IncrementSamplesAtStep(step, int64(len(floats)+len(histograms))) enh.Out = outVec[:0] if len(outVec) > 0 { - ss.Points = append(ss.Points, Point{V: outVec[0].Point.V, H: outVec[0].Point.H, T: ts}) + if outVec[0].H == nil { + if ss.Floats == nil { + ss.Floats = getFPointSlice(numSteps) + } + ss.Floats = append(ss.Floats, FPoint{F: outVec[0].F, T: ts}) + } else { + if ss.Histograms == nil { + ss.Histograms = getHPointSlice(numSteps) + } + ss.Histograms = append(ss.Histograms, HPoint{H: outVec[0].H, T: ts}) + } } // Only buffer stepRange milliseconds from the second step on. it.ReduceDelta(stepRange) } - if len(ss.Points) > 0 { - if ev.currentSamples+len(ss.Points) <= ev.maxSamples { + if len(ss.Floats)+len(ss.Histograms) > 0 { + if ev.currentSamples+len(ss.Floats)+len(ss.Histograms) <= ev.maxSamples { mat = append(mat, ss) - ev.currentSamples += len(ss.Points) + ev.currentSamples += len(ss.Floats) + len(ss.Histograms) } else { ev.error(ErrTooManySamples(env)) } - } else { - putPointSlice(ss.Points) } ev.samplesStats.UpdatePeak(ev.currentSamples) } ev.samplesStats.UpdatePeak(ev.currentSamples) - ev.currentSamples -= len(points) - putPointSlice(points) + ev.currentSamples -= len(floats) + len(histograms) + putFPointSlice(floats) + putHPointSlice(histograms) // The absent_over_time function returns 0 or 1 series. So far, the matrix // contains multiple series. The following code will create a new series @@ -1474,7 +1522,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { steps := int(1 + (ev.endTimestamp-ev.startTimestamp)/ev.interval) // Iterate once to look for a complete series. for _, s := range mat { - if len(s.Points) == steps { + if len(s.Floats)+len(s.Histograms) == steps { return Matrix{}, warnings } } @@ -1482,7 +1530,10 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { found := map[int64]struct{}{} for i, s := range mat { - for _, p := range s.Points { + for _, p := range s.Floats { + found[p.T] = struct{}{} + } + for _, p := range s.Histograms { found[p.T] = struct{}{} } if i > 0 && len(found) == steps { @@ -1490,17 +1541,17 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } } - newp := make([]Point, 0, steps-len(found)) + newp := make([]FPoint, 0, steps-len(found)) for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { if _, ok := found[ts]; !ok { - newp = append(newp, Point{T: ts, V: 1}) + newp = append(newp, FPoint{T: ts, F: 1}) } } return Matrix{ Series{ Metric: createLabelsForAbsentFunction(e.Args[0]), - Points: newp, + Floats: newp, }, }, warnings } @@ -1520,8 +1571,8 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { if e.Op == parser.SUB { for i := range mat { mat[i].Metric = dropMetricName(mat[i].Metric) - for j := range mat[i].Points { - mat[i].Points[j].V = -mat[i].Points[j].V + for j := range mat[i].Floats { + mat[i].Floats[j].F = -mat[i].Floats[j].F } } if mat.ContainsSameLabelset() { @@ -1534,8 +1585,8 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { switch lt, rt := e.LHS.Type(), e.RHS.Type(); { case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar: return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - val := scalarBinop(e.Op, v[0].(Vector)[0].Point.V, v[1].(Vector)[0].Point.V) - return append(enh.Out, Sample{Point: Point{V: val}}), nil + val := scalarBinop(e.Op, v[0].(Vector)[0].F, v[1].(Vector)[0].F) + return append(enh.Out, Sample{F: val}), nil }, e.LHS, e.RHS) case lt == parser.ValueTypeVector && rt == parser.ValueTypeVector: // Function to compute the join signature for each series. @@ -1565,18 +1616,18 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].Point.V}, false, e.ReturnBool, enh), nil + return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh), nil }, e.LHS, e.RHS) case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].Point.V}, true, e.ReturnBool, enh), nil + return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh), nil }, e.LHS, e.RHS) } case *parser.NumberLiteral: return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return append(enh.Out, Sample{Point: Point{V: e.Val}, Metric: labels.EmptyLabels()}), nil + return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil }) case *parser.StringLiteral: @@ -1595,15 +1646,24 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { it.Reset(chkIter) ss := Series{ Metric: e.Series[i].Labels(), - Points: getPointSlice(numSteps), } for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { step++ - _, v, h, ok := ev.vectorSelectorSingle(it, e, ts) + _, f, h, ok := ev.vectorSelectorSingle(it, e, ts) if ok { if ev.currentSamples < ev.maxSamples { - ss.Points = append(ss.Points, Point{V: v, H: h, T: ts}) + if h == nil { + if ss.Floats == nil { + ss.Floats = getFPointSlice(numSteps) + } + ss.Floats = append(ss.Floats, FPoint{F: f, T: ts}) + } else { + if ss.Histograms == nil { + ss.Histograms = getHPointSlice(numSteps) + } + ss.Histograms = append(ss.Histograms, HPoint{H: h, T: ts}) + } ev.samplesStats.IncrementSamplesAtStep(step, 1) ev.currentSamples++ } else { @@ -1612,10 +1672,8 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } } - if len(ss.Points) > 0 { + if len(ss.Floats)+len(ss.Histograms) > 0 { mat = append(mat, ss) - } else { - putPointSlice(ss.Points) } } ev.samplesStats.UpdatePeak(ev.currentSamples) @@ -1706,15 +1764,21 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { panic(fmt.Errorf("unexpected result in StepInvariantExpr evaluation: %T", expr)) } for i := range mat { - if len(mat[i].Points) != 1 { + if len(mat[i].Floats)+len(mat[i].Histograms) != 1 { panic(fmt.Errorf("unexpected number of samples")) } for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts = ts + ev.interval { - mat[i].Points = append(mat[i].Points, Point{ - T: ts, - V: mat[i].Points[0].V, - H: mat[i].Points[0].H, - }) + if len(mat[i].Floats) > 0 { + mat[i].Floats = append(mat[i].Floats, FPoint{ + T: ts, + F: mat[i].Floats[0].F, + }) + } else { + mat[i].Histograms = append(mat[i].Histograms, HPoint{ + T: ts, + H: mat[i].Histograms[0].H, + }) + } ev.currentSamples++ if ev.currentSamples > ev.maxSamples { ev.error(ErrTooManySamples(env)) @@ -1741,11 +1805,13 @@ func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vect chkIter = s.Iterator(chkIter) it.Reset(chkIter) - t, v, h, ok := ev.vectorSelectorSingle(it, node, ts) + t, f, h, ok := ev.vectorSelectorSingle(it, node, ts) if ok { vec = append(vec, Sample{ Metric: node.Series[i].Labels(), - Point: Point{V: v, H: h, T: t}, + T: t, + F: f, + H: h, }) ev.currentSamples++ @@ -1795,17 +1861,31 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, no return t, v, h, true } -var pointPool zeropool.Pool[[]Point] +var ( + fPointPool zeropool.Pool[[]FPoint] + hPointPool zeropool.Pool[[]HPoint] +) -func getPointSlice(sz int) []Point { - if p := pointPool.Get(); p != nil { +func getFPointSlice(sz int) []FPoint { + if p := fPointPool.Get(); p != nil { return p } - return make([]Point, 0, sz) + return make([]FPoint, 0, sz) } -func putPointSlice(p []Point) { - pointPool.Put(p[:0]) +func putFPointSlice(p []FPoint) { + fPointPool.Put(p[:0]) +} + +func getHPointSlice(sz int) []HPoint { + if p := hPointPool.Get(); p != nil { + return p + } + return make([]HPoint, 0, sz) +} + +func putHPointSlice(p []HPoint) { + hPointPool.Put(p[:0]) } // matrixSelector evaluates a *parser.MatrixSelector expression. @@ -1837,13 +1917,15 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storag Metric: series[i].Labels(), } - ss.Points = ev.matrixIterSlice(it, mint, maxt, getPointSlice(16)) - ev.samplesStats.IncrementSamplesAtTimestamp(ev.startTimestamp, int64(len(ss.Points))) + ss.Floats, ss.Histograms = ev.matrixIterSlice(it, mint, maxt, nil, nil) + totalLen := int64(len(ss.Floats)) + int64(len(ss.Histograms)) + ev.samplesStats.IncrementSamplesAtTimestamp(ev.startTimestamp, totalLen) - if len(ss.Points) > 0 { + if totalLen > 0 { matrix = append(matrix, ss) } else { - putPointSlice(ss.Points) + putFPointSlice(ss.Floats) + putHPointSlice(ss.Histograms) } } return matrix, ws @@ -1857,24 +1939,54 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storag // values). Any such points falling before mint are discarded; points that fall // into the [mint, maxt] range are retained; only points with later timestamps // are populated from the iterator. -func (ev *evaluator) matrixIterSlice(it *storage.BufferedSeriesIterator, mint, maxt int64, out []Point) []Point { - if len(out) > 0 && out[len(out)-1].T >= mint { +func (ev *evaluator) matrixIterSlice( + it *storage.BufferedSeriesIterator, mint, maxt int64, + floats []FPoint, histograms []HPoint, +) ([]FPoint, []HPoint) { + mintFloats, mintHistograms := mint, mint + + // First floats... + if len(floats) > 0 && floats[len(floats)-1].T >= mint { // There is an overlap between previous and current ranges, retain common // points. In most such cases: // (a) the overlap is significantly larger than the eval step; and/or // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; out[drop].T < mint; drop++ { + for drop = 0; floats[drop].T < mint; drop++ { } ev.currentSamples -= drop - copy(out, out[drop:]) - out = out[:len(out)-drop] + copy(floats, floats[drop:]) + floats = floats[:len(floats)-drop] // Only append points with timestamps after the last timestamp we have. - mint = out[len(out)-1].T + 1 + mintFloats = floats[len(floats)-1].T + 1 } else { - ev.currentSamples -= len(out) - out = out[:0] + ev.currentSamples -= len(floats) + if floats != nil { + floats = floats[:0] + } + } + + // ...then the same for histograms. TODO(beorn7): Use generics? + if len(histograms) > 0 && histograms[len(histograms)-1].T >= mint { + // There is an overlap between previous and current ranges, retain common + // points. In most such cases: + // (a) the overlap is significantly larger than the eval step; and/or + // (b) the number of samples is relatively small. + // so a linear search will be as fast as a binary search. + var drop int + for drop = 0; histograms[drop].T < mint; drop++ { + } + ev.currentSamples -= drop + copy(histograms, histograms[drop:]) + histograms = histograms[:len(histograms)-drop] + // Only append points with timestamps after the last timestamp we have. + mintHistograms = histograms[len(histograms)-1].T + 1 + } else { + ev.currentSamples -= len(histograms) + if histograms != nil { + histograms = histograms[:0] + } } soughtValueType := it.Seek(maxt) @@ -1896,25 +2008,31 @@ loop: continue loop } // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mint { + if t >= mintHistograms { if ev.currentSamples >= ev.maxSamples { ev.error(ErrTooManySamples(env)) } ev.currentSamples++ - out = append(out, Point{T: t, H: h}) + if histograms == nil { + histograms = getHPointSlice(16) + } + histograms = append(histograms, HPoint{T: t, H: h}) } case chunkenc.ValFloat: - t, v := buf.At() - if value.IsStaleNaN(v) { + t, f := buf.At() + if value.IsStaleNaN(f) { continue loop } // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mint { + if t >= mintFloats { if ev.currentSamples >= ev.maxSamples { ev.error(ErrTooManySamples(env)) } ev.currentSamples++ - out = append(out, Point{T: t, V: v}) + if floats == nil { + floats = getFPointSlice(16) + } + floats = append(floats, FPoint{T: t, F: f}) } } } @@ -1926,21 +2044,27 @@ loop: if ev.currentSamples >= ev.maxSamples { ev.error(ErrTooManySamples(env)) } - out = append(out, Point{T: t, H: h}) + if histograms == nil { + histograms = getHPointSlice(16) + } + histograms = append(histograms, HPoint{T: t, H: h}) ev.currentSamples++ } case chunkenc.ValFloat: - t, v := it.At() - if t == maxt && !value.IsStaleNaN(v) { + t, f := it.At() + if t == maxt && !value.IsStaleNaN(f) { if ev.currentSamples >= ev.maxSamples { ev.error(ErrTooManySamples(env)) } - out = append(out, Point{T: t, V: v}) + if floats == nil { + floats = getFPointSlice(16) + } + floats = append(floats, FPoint{T: t, F: f}) ev.currentSamples++ } } ev.samplesStats.UpdatePeak(ev.currentSamples) - return out + return floats, histograms } func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector { @@ -2086,18 +2210,18 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * } // Account for potentially swapped sidedness. - vl, vr := ls.V, rs.V + fl, fr := ls.F, rs.F hl, hr := ls.H, rs.H if matching.Card == parser.CardOneToMany { - vl, vr = vr, vl + fl, fr = fr, fl hl, hr = hr, hl } - value, histogramValue, keep := vectorElemBinop(op, vl, vr, hl, hr) + floatValue, histogramValue, keep := vectorElemBinop(op, fl, fr, hl, hr) if returnBool { if keep { - value = 1.0 + floatValue = 1.0 } else { - value = 0.0 + floatValue = 0.0 } } else if !keep { continue @@ -2131,7 +2255,8 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * // Both lhs and rhs are of same type. enh.Out = append(enh.Out, Sample{ Metric: metric, - Point: Point{V: value, H: histogramValue}, + F: floatValue, + H: histogramValue, }) } } @@ -2200,7 +2325,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V // VectorscalarBinop evaluates a binary operation between a Vector and a Scalar. func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) Vector { for _, lhsSample := range lhs { - lv, rv := lhsSample.V, rhs.V + lv, rv := lhsSample.F, rhs.V // lhs always contains the Vector. If the original position was different // swap for calculating the value. if swap { @@ -2221,7 +2346,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala keep = true } if keep { - lhsSample.V = value + lhsSample.F = value if shouldDropMetricName(op) || returnBool { lhsSample.Metric = enh.DropMetricName(lhsSample.Metric) } @@ -2313,7 +2438,7 @@ type groupedAggregation struct { hasFloat bool // Has at least 1 float64 sample aggregated. hasHistogram bool // Has at least 1 histogram sample aggregated. labels labels.Labels - value float64 + floatValue float64 histogramValue *histogram.FloatHistogram mean float64 groupCount int @@ -2365,7 +2490,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without if op == parser.COUNT_VALUES { enh.resetBuilder(metric) - enh.lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64)) + enh.lb.Set(valueLabel, strconv.FormatFloat(s.F, 'f', -1, 64)) metric = enh.lb.Labels() // We've changed the metric so we have to recompute the grouping key. @@ -2397,8 +2522,8 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without } newAgg := &groupedAggregation{ labels: m, - value: s.V, - mean: s.V, + floatValue: s.F, + mean: s.F, groupCount: 1, } if s.H == nil { @@ -2420,21 +2545,21 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without } switch op { case parser.STDVAR, parser.STDDEV: - result[groupingKey].value = 0 + result[groupingKey].floatValue = 0 case parser.TOPK, parser.QUANTILE: result[groupingKey].heap = make(vectorByValueHeap, 1, resultSize) result[groupingKey].heap[0] = Sample{ - Point: Point{V: s.V}, + F: s.F, Metric: s.Metric, } case parser.BOTTOMK: result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 1, resultSize) result[groupingKey].reverseHeap[0] = Sample{ - Point: Point{V: s.V}, + F: s.F, Metric: s.Metric, } case parser.GROUP: - result[groupingKey].value = 1 + result[groupingKey].floatValue = 1 } continue } @@ -2459,19 +2584,19 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without // point in copying the histogram in that case. } else { group.hasFloat = true - group.value += s.V + group.floatValue += s.F } case parser.AVG: group.groupCount++ if math.IsInf(group.mean, 0) { - if math.IsInf(s.V, 0) && (group.mean > 0) == (s.V > 0) { + if math.IsInf(s.F, 0) && (group.mean > 0) == (s.F > 0) { // The `mean` and `s.V` values are `Inf` of the same sign. They // can't be subtracted, but the value of `mean` is correct // already. break } - if !math.IsInf(s.V, 0) && !math.IsNaN(s.V) { + if !math.IsInf(s.F, 0) && !math.IsNaN(s.F) { // At this stage, the mean is an infinite. If the added // value is neither an Inf or a Nan, we can keep that mean // value. @@ -2482,19 +2607,19 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without } } // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. - group.mean += s.V/float64(group.groupCount) - group.mean/float64(group.groupCount) + group.mean += s.F/float64(group.groupCount) - group.mean/float64(group.groupCount) case parser.GROUP: // Do nothing. Required to avoid the panic in `default:` below. case parser.MAX: - if group.value < s.V || math.IsNaN(group.value) { - group.value = s.V + if group.floatValue < s.F || math.IsNaN(group.floatValue) { + group.floatValue = s.F } case parser.MIN: - if group.value > s.V || math.IsNaN(group.value) { - group.value = s.V + if group.floatValue > s.F || math.IsNaN(group.floatValue) { + group.floatValue = s.F } case parser.COUNT, parser.COUNT_VALUES: @@ -2502,21 +2627,21 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without case parser.STDVAR, parser.STDDEV: group.groupCount++ - delta := s.V - group.mean + delta := s.F - group.mean group.mean += delta / float64(group.groupCount) - group.value += delta * (s.V - group.mean) + group.floatValue += delta * (s.F - group.mean) case parser.TOPK: // We build a heap of up to k elements, with the smallest element at heap[0]. if int64(len(group.heap)) < k { heap.Push(&group.heap, &Sample{ - Point: Point{V: s.V}, + F: s.F, Metric: s.Metric, }) - } else if group.heap[0].V < s.V || (math.IsNaN(group.heap[0].V) && !math.IsNaN(s.V)) { + } else if group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)) { // This new element is bigger than the previous smallest element - overwrite that. group.heap[0] = Sample{ - Point: Point{V: s.V}, + F: s.F, Metric: s.Metric, } if k > 1 { @@ -2528,13 +2653,13 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without // We build a heap of up to k elements, with the biggest element at heap[0]. if int64(len(group.reverseHeap)) < k { heap.Push(&group.reverseHeap, &Sample{ - Point: Point{V: s.V}, + F: s.F, Metric: s.Metric, }) - } else if group.reverseHeap[0].V > s.V || (math.IsNaN(group.reverseHeap[0].V) && !math.IsNaN(s.V)) { + } else if group.reverseHeap[0].F > s.F || (math.IsNaN(group.reverseHeap[0].F) && !math.IsNaN(s.F)) { // This new element is smaller than the previous biggest element - overwrite that. group.reverseHeap[0] = Sample{ - Point: Point{V: s.V}, + F: s.F, Metric: s.Metric, } if k > 1 { @@ -2554,16 +2679,16 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without for _, aggr := range orderedResult { switch op { case parser.AVG: - aggr.value = aggr.mean + aggr.floatValue = aggr.mean case parser.COUNT, parser.COUNT_VALUES: - aggr.value = float64(aggr.groupCount) + aggr.floatValue = float64(aggr.groupCount) case parser.STDVAR: - aggr.value = aggr.value / float64(aggr.groupCount) + aggr.floatValue = aggr.floatValue / float64(aggr.groupCount) case parser.STDDEV: - aggr.value = math.Sqrt(aggr.value / float64(aggr.groupCount)) + aggr.floatValue = math.Sqrt(aggr.floatValue / float64(aggr.groupCount)) case parser.TOPK: // The heap keeps the lowest value on top, so reverse it. @@ -2573,7 +2698,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without for _, v := range aggr.heap { enh.Out = append(enh.Out, Sample{ Metric: v.Metric, - Point: Point{V: v.V}, + F: v.F, }) } continue // Bypass default append. @@ -2586,13 +2711,13 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without for _, v := range aggr.reverseHeap { enh.Out = append(enh.Out, Sample{ Metric: v.Metric, - Point: Point{V: v.V}, + F: v.F, }) } continue // Bypass default append. case parser.QUANTILE: - aggr.value = quantile(q, aggr.heap) + aggr.floatValue = quantile(q, aggr.heap) case parser.SUM: if aggr.hasFloat && aggr.hasHistogram { @@ -2605,7 +2730,8 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without enh.Out = append(enh.Out, Sample{ Metric: aggr.labels, - Point: Point{V: aggr.value, H: aggr.histogramValue}, + F: aggr.floatValue, + H: aggr.histogramValue, }) } return enh.Out diff --git a/promql/engine_test.go b/promql/engine_test.go index d1c4570ea..b64e32ba4 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -662,7 +662,8 @@ load 10s Query: "metric", Result: Vector{ Sample{ - Point: Point{V: 1, T: 1000}, + F: 1, + T: 1000, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -672,7 +673,7 @@ load 10s Query: "metric[20s]", Result: Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 2, T: 10000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -683,7 +684,7 @@ load 10s Query: "1", Result: Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 1000}, {V: 1, T: 2000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 1000}, {F: 1, T: 2000}}, Metric: labels.EmptyLabels(), }, }, @@ -695,7 +696,7 @@ load 10s Query: "metric", Result: Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 1000}, {V: 1, T: 2000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 1000}, {F: 1, T: 2000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -707,7 +708,7 @@ load 10s Query: "metric", Result: Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1462,20 +1463,20 @@ load 1ms query: `metric_neg @ 0`, start: 100, result: Vector{ - Sample{Point: Point{V: 1, T: 100000}, Metric: lblsneg}, + Sample{F: 1, T: 100000, Metric: lblsneg}, }, }, { query: `metric_neg @ -200`, start: 100, result: Vector{ - Sample{Point: Point{V: 201, T: 100000}, Metric: lblsneg}, + Sample{F: 201, T: 100000, Metric: lblsneg}, }, }, { query: `metric{job="2"} @ 50`, start: -2, end: 2, interval: 1, result: Matrix{ Series{ - Points: []Point{{V: 10, T: -2000}, {V: 10, T: -1000}, {V: 10, T: 0}, {V: 10, T: 1000}, {V: 10, T: 2000}}, + Floats: []FPoint{{F: 10, T: -2000}, {F: 10, T: -1000}, {F: 10, T: 0}, {F: 10, T: 1000}, {F: 10, T: 2000}}, Metric: lbls2, }, }, @@ -1484,11 +1485,11 @@ load 1ms start: 10, result: Matrix{ Series{ - Points: []Point{{V: 28, T: 280000}, {V: 29, T: 290000}, {V: 30, T: 300000}}, + Floats: []FPoint{{F: 28, T: 280000}, {F: 29, T: 290000}, {F: 30, T: 300000}}, Metric: lbls1, }, Series{ - Points: []Point{{V: 56, T: 280000}, {V: 58, T: 290000}, {V: 60, T: 300000}}, + Floats: []FPoint{{F: 56, T: 280000}, {F: 58, T: 290000}, {F: 60, T: 300000}}, Metric: lbls2, }, }, @@ -1497,7 +1498,7 @@ load 1ms start: 100, result: Matrix{ Series{ - Points: []Point{{V: 3, T: -2000}, {V: 2, T: -1000}, {V: 1, T: 0}}, + Floats: []FPoint{{F: 3, T: -2000}, {F: 2, T: -1000}, {F: 1, T: 0}}, Metric: lblsneg, }, }, @@ -1506,7 +1507,7 @@ load 1ms start: 100, result: Matrix{ Series{ - Points: []Point{{V: 504, T: -503000}, {V: 503, T: -502000}, {V: 502, T: -501000}, {V: 501, T: -500000}}, + Floats: []FPoint{{F: 504, T: -503000}, {F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}}, Metric: lblsneg, }, }, @@ -1515,7 +1516,7 @@ load 1ms start: 100, result: Matrix{ Series{ - Points: []Point{{V: 2342, T: 2342}, {V: 2343, T: 2343}, {V: 2344, T: 2344}, {V: 2345, T: 2345}}, + Floats: []FPoint{{F: 2342, T: 2342}, {F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}}, Metric: lblsms, }, }, @@ -1524,11 +1525,11 @@ load 1ms start: 100, result: Matrix{ Series{ - Points: []Point{{V: 20, T: 200000}, {V: 22, T: 225000}, {V: 25, T: 250000}, {V: 27, T: 275000}, {V: 30, T: 300000}}, + Floats: []FPoint{{F: 20, T: 200000}, {F: 22, T: 225000}, {F: 25, T: 250000}, {F: 27, T: 275000}, {F: 30, T: 300000}}, Metric: lbls1, }, Series{ - Points: []Point{{V: 40, T: 200000}, {V: 44, T: 225000}, {V: 50, T: 250000}, {V: 54, T: 275000}, {V: 60, T: 300000}}, + Floats: []FPoint{{F: 40, T: 200000}, {F: 44, T: 225000}, {F: 50, T: 250000}, {F: 54, T: 275000}, {F: 60, T: 300000}}, Metric: lbls2, }, }, @@ -1537,7 +1538,7 @@ load 1ms start: 100, result: Matrix{ Series{ - Points: []Point{{V: 51, T: -50000}, {V: 26, T: -25000}, {V: 1, T: 0}}, + Floats: []FPoint{{F: 51, T: -50000}, {F: 26, T: -25000}, {F: 1, T: 0}}, Metric: lblsneg, }, }, @@ -1546,7 +1547,7 @@ load 1ms start: 100, result: Matrix{ Series{ - Points: []Point{{V: 151, T: -150000}, {V: 126, T: -125000}, {V: 101, T: -100000}}, + Floats: []FPoint{{F: 151, T: -150000}, {F: 126, T: -125000}, {F: 101, T: -100000}}, Metric: lblsneg, }, }, @@ -1555,7 +1556,7 @@ load 1ms start: 100, result: Matrix{ Series{ - Points: []Point{{V: 2250, T: 2250}, {V: 2275, T: 2275}, {V: 2300, T: 2300}, {V: 2325, T: 2325}}, + Floats: []FPoint{{F: 2250, T: 2250}, {F: 2275, T: 2275}, {F: 2300, T: 2300}, {F: 2325, T: 2325}}, Metric: lblsms, }, }, @@ -1564,7 +1565,7 @@ load 1ms start: 50, end: 80, interval: 10, result: Matrix{ Series{ - Points: []Point{{V: 995, T: 50000}, {V: 994, T: 60000}, {V: 993, T: 70000}, {V: 992, T: 80000}}, + Floats: []FPoint{{F: 995, T: 50000}, {F: 994, T: 60000}, {F: 993, T: 70000}, {F: 992, T: 80000}}, Metric: lblstopk3, }, }, @@ -1573,7 +1574,7 @@ load 1ms start: 50, end: 80, interval: 10, result: Matrix{ Series{ - Points: []Point{{V: 10, T: 50000}, {V: 12, T: 60000}, {V: 14, T: 70000}, {V: 16, T: 80000}}, + Floats: []FPoint{{F: 10, T: 50000}, {F: 12, T: 60000}, {F: 14, T: 70000}, {F: 16, T: 80000}}, Metric: lblstopk2, }, }, @@ -1582,7 +1583,7 @@ load 1ms start: 70, end: 100, interval: 10, result: Matrix{ Series{ - Points: []Point{{V: 993, T: 70000}, {V: 992, T: 80000}, {V: 991, T: 90000}, {V: 990, T: 100000}}, + Floats: []FPoint{{F: 993, T: 70000}, {F: 992, T: 80000}, {F: 991, T: 90000}, {F: 990, T: 100000}}, Metric: lblstopk3, }, }, @@ -1591,7 +1592,7 @@ load 1ms start: 100, end: 130, interval: 10, result: Matrix{ Series{ - Points: []Point{{V: 990, T: 100000}, {V: 989, T: 110000}, {V: 988, T: 120000}, {V: 987, T: 130000}}, + Floats: []FPoint{{F: 990, T: 100000}, {F: 989, T: 110000}, {F: 988, T: 120000}, {F: 987, T: 130000}}, Metric: lblstopk3, }, }, @@ -1602,15 +1603,15 @@ load 1ms start: 0, end: 7 * 60, interval: 60, result: Matrix{ Series{ - Points: []Point{ - {V: 3600, T: 0}, - {V: 3600, T: 60 * 1000}, - {V: 3600, T: 2 * 60 * 1000}, - {V: 3600, T: 3 * 60 * 1000}, - {V: 3600, T: 4 * 60 * 1000}, - {V: 3600, T: 5 * 60 * 1000}, - {V: 3600, T: 6 * 60 * 1000}, - {V: 3600, T: 7 * 60 * 1000}, + Floats: []FPoint{ + {F: 3600, T: 0}, + {F: 3600, T: 60 * 1000}, + {F: 3600, T: 2 * 60 * 1000}, + {F: 3600, T: 3 * 60 * 1000}, + {F: 3600, T: 4 * 60 * 1000}, + {F: 3600, T: 5 * 60 * 1000}, + {F: 3600, T: 6 * 60 * 1000}, + {F: 3600, T: 7 * 60 * 1000}, }, Metric: labels.EmptyLabels(), }, @@ -1723,7 +1724,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 2, T: 10000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1737,7 +1738,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1751,7 +1752,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1765,7 +1766,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1779,7 +1780,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}, {V: 2, T: 30000}}, + Floats: []FPoint{{F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1793,7 +1794,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}, {V: 2, T: 30000}}, + Floats: []FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1807,7 +1808,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}}, + Floats: []FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1821,7 +1822,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}}, + Floats: []FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1844,7 +1845,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 9990, T: 9990000}, {V: 10000, T: 10000000}, {V: 100, T: 10010000}, {V: 130, T: 10020000}}, + Floats: []FPoint{{F: 9990, T: 9990000}, {F: 10000, T: 10000000}, {F: 100, T: 10010000}, {F: 130, T: 10020000}}, Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), }, }, @@ -1858,7 +1859,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 9840, T: 9840000}, {V: 9900, T: 9900000}, {V: 9960, T: 9960000}, {V: 130, T: 10020000}, {V: 310, T: 10080000}}, + Floats: []FPoint{{F: 9840, T: 9840000}, {F: 9900, T: 9900000}, {F: 9960, T: 9960000}, {F: 130, T: 10020000}, {F: 310, T: 10080000}}, Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), }, }, @@ -1872,7 +1873,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 8640, T: 8640000}, {V: 8700, T: 8700000}, {V: 8760, T: 8760000}, {V: 8820, T: 8820000}, {V: 8880, T: 8880000}}, + Floats: []FPoint{{F: 8640, T: 8640000}, {F: 8700, T: 8700000}, {F: 8760, T: 8760000}, {F: 8820, T: 8820000}, {F: 8880, T: 8880000}}, Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), }, }, @@ -1886,19 +1887,19 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 3, T: 7985000}, {V: 3, T: 7990000}, {V: 3, T: 7995000}, {V: 3, T: 8000000}}, + Floats: []FPoint{{F: 3, T: 7985000}, {F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}}, Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"), }, Series{ - Points: []Point{{V: 4, T: 7985000}, {V: 4, T: 7990000}, {V: 4, T: 7995000}, {V: 4, T: 8000000}}, + Floats: []FPoint{{F: 4, T: 7985000}, {F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}}, Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"), }, Series{ - Points: []Point{{V: 1, T: 7985000}, {V: 1, T: 7990000}, {V: 1, T: 7995000}, {V: 1, T: 8000000}}, + Floats: []FPoint{{F: 1, T: 7985000}, {F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}}, Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"), }, Series{ - Points: []Point{{V: 2, T: 7985000}, {V: 2, T: 7990000}, {V: 2, T: 7995000}, {V: 2, T: 8000000}}, + Floats: []FPoint{{F: 2, T: 7985000}, {F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}}, Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"), }, }, @@ -1912,7 +1913,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 270, T: 90000}, {V: 300, T: 100000}, {V: 330, T: 110000}, {V: 360, T: 120000}}, + Floats: []FPoint{{F: 270, T: 90000}, {F: 300, T: 100000}, {F: 330, T: 110000}, {F: 360, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -1926,7 +1927,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 800, T: 80000}, {V: 900, T: 90000}, {V: 1000, T: 100000}, {V: 1100, T: 110000}, {V: 1200, T: 120000}}, + Floats: []FPoint{{F: 800, T: 80000}, {F: 900, T: 90000}, {F: 1000, T: 100000}, {F: 1100, T: 110000}, {F: 1200, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -1940,7 +1941,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 1000, T: 100000}, {V: 1000, T: 105000}, {V: 1100, T: 110000}, {V: 1100, T: 115000}, {V: 1200, T: 120000}}, + Floats: []FPoint{{F: 1000, T: 100000}, {F: 1000, T: 105000}, {F: 1100, T: 110000}, {F: 1100, T: 115000}, {F: 1200, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -2996,7 +2997,7 @@ func TestRangeQuery(t *testing.T) { Query: "sum_over_time(bar[30s])", Result: Matrix{ Series{ - Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}}, + Floats: []FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -3011,7 +3012,7 @@ func TestRangeQuery(t *testing.T) { Query: "sum_over_time(bar[30s])", Result: Matrix{ Series{ - Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}}, + Floats: []FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -3026,7 +3027,7 @@ func TestRangeQuery(t *testing.T) { Query: "sum_over_time(bar[30s])", Result: Matrix{ Series{ - Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}, {V: 110000, T: 180000}, {V: 11000000, T: 240000}}, + Floats: []FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}, {F: 110000, T: 180000}, {F: 11000000, T: 240000}}, Metric: labels.EmptyLabels(), }, }, @@ -3041,7 +3042,7 @@ func TestRangeQuery(t *testing.T) { Query: "sum_over_time(bar[30s])", Result: Matrix{ Series{ - Points: []Point{{V: 5, T: 0}, {V: 59, T: 60000}, {V: 9, T: 120000}, {V: 956, T: 180000}}, + Floats: []FPoint{{F: 5, T: 0}, {F: 59, T: 60000}, {F: 9, T: 120000}, {F: 956, T: 180000}}, Metric: labels.EmptyLabels(), }, }, @@ -3056,7 +3057,7 @@ func TestRangeQuery(t *testing.T) { Query: "metric", Result: Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -3071,7 +3072,7 @@ func TestRangeQuery(t *testing.T) { Query: "metric", Result: Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -3087,14 +3088,14 @@ func TestRangeQuery(t *testing.T) { Query: `foo > 2 or bar`, Result: Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, Metric: labels.FromStrings( "__name__", "bar", "job", "2", ), }, Series{ - Points: []Point{{V: 3, T: 60000}, {V: 5, T: 120000}}, + Floats: []FPoint{{F: 3, T: 60000}, {F: 5, T: 120000}}, Metric: labels.FromStrings( "__name__", "foo", "job", "1", @@ -3266,9 +3267,9 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { require.Len(t, vector, 1) require.Nil(t, vector[0].H) if floatHisto { - require.Equal(t, float64(h.ToFloat().Count), vector[0].V) + require.Equal(t, float64(h.ToFloat().Count), vector[0].F) } else { - require.Equal(t, float64(h.Count), vector[0].V) + require.Equal(t, float64(h.Count), vector[0].F) } queryString = fmt.Sprintf("histogram_sum(%s)", seriesName) @@ -3284,9 +3285,9 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { require.Len(t, vector, 1) require.Nil(t, vector[0].H) if floatHisto { - require.Equal(t, h.ToFloat().Sum, vector[0].V) + require.Equal(t, h.ToFloat().Sum, vector[0].F) } else { - require.Equal(t, h.Sum, vector[0].V) + require.Equal(t, h.Sum, vector[0].F) } }) } @@ -3519,7 +3520,7 @@ func TestNativeHistogram_HistogramQuantile(t *testing.T) { require.Len(t, vector, 1) require.Nil(t, vector[0].H) - require.True(t, almostEqual(sc.value, vector[0].V)) + require.True(t, almostEqual(sc.value, vector[0].F)) }) } idx++ @@ -3951,10 +3952,10 @@ func TestNativeHistogram_HistogramFraction(t *testing.T) { require.Len(t, vector, 1) require.Nil(t, vector[0].H) if math.IsNaN(sc.value) { - require.True(t, math.IsNaN(vector[0].V)) + require.True(t, math.IsNaN(vector[0].F)) return } - require.Equal(t, sc.value, vector[0].V) + require.Equal(t, sc.value, vector[0].F) }) } idx++ @@ -4090,24 +4091,18 @@ func TestNativeHistogram_Sum_Count_AddOperator(t *testing.T) { // sum(). queryString := fmt.Sprintf("sum(%s)", seriesName) - queryAndCheck(queryString, []Sample{ - {Point{T: ts, H: &c.expected}, labels.EmptyLabels()}, - }) + queryAndCheck(queryString, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) // + operator. queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName) for idx := 1; idx < len(c.histograms); idx++ { queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx) } - queryAndCheck(queryString, []Sample{ - {Point{T: ts, H: &c.expected}, labels.EmptyLabels()}, - }) + queryAndCheck(queryString, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) // count(). queryString = fmt.Sprintf("count(%s)", seriesName) - queryAndCheck(queryString, []Sample{ - {Point{T: ts, V: 3}, labels.EmptyLabels()}, - }) + queryAndCheck(queryString, []Sample{{T: ts, F: 3, Metric: labels.EmptyLabels()}}) }) idx0++ } diff --git a/promql/functions.go b/promql/functions.go index e1a02ac8e..8fc5e64a5 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -54,9 +54,9 @@ type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNo // === time() float64 === func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return Vector{Sample{Point: Point{ - V: float64(enh.Ts) / 1000, - }}} + return Vector{Sample{ + F: float64(enh.Ts) / 1000, + }} } // extrapolatedRate is a utility function for rate/increase/delta. @@ -67,65 +67,71 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod ms := args[0].(*parser.MatrixSelector) vs := ms.VectorSelector.(*parser.VectorSelector) var ( - samples = vals[0].(Matrix)[0] - rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset) - rangeEnd = enh.Ts - durationMilliseconds(vs.Offset) - resultValue float64 - resultHistogram *histogram.FloatHistogram + samples = vals[0].(Matrix)[0] + rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset) + rangeEnd = enh.Ts - durationMilliseconds(vs.Offset) + resultValue float64 + resultHistogram *histogram.FloatHistogram + firstT, lastT int64 + numSamplesMinusOne int ) - // No sense in trying to compute a rate without at least two points. Drop - // this Vector element. - if len(samples.Points) < 2 { + // We need either at least two Histograms and no Floats, or at least two + // Floats and no Histograms to calculate a rate. Otherwise, drop this + // Vector element. + if len(samples.Histograms) > 0 && len(samples.Floats) > 0 { + // Mix of histograms and floats. TODO(beorn7): Communicate this failure reason. return enh.Out } - if samples.Points[0].H != nil { - resultHistogram = histogramRate(samples.Points, isCounter) + switch { + case len(samples.Histograms) > 1: + numSamplesMinusOne = len(samples.Histograms) - 1 + firstT = samples.Histograms[0].T + lastT = samples.Histograms[numSamplesMinusOne].T + resultHistogram = histogramRate(samples.Histograms, isCounter) if resultHistogram == nil { - // Points are a mix of floats and histograms, or the histograms - // are not compatible with each other. - // TODO(beorn7): find a way of communicating the exact reason + // The histograms are not compatible with each other. + // TODO(beorn7): Communicate this failure reason. return enh.Out } - } else { - resultValue = samples.Points[len(samples.Points)-1].V - samples.Points[0].V - prevValue := samples.Points[0].V - // We have to iterate through everything even in the non-counter - // case because we have to check that everything is a float. - // TODO(beorn7): Find a way to check that earlier, e.g. by - // handing in a []FloatPoint and a []HistogramPoint separately. - for _, currPoint := range samples.Points[1:] { - if currPoint.H != nil { - return nil // Range contains a mix of histograms and floats. - } - if !isCounter { - continue - } - if currPoint.V < prevValue { + case len(samples.Floats) > 1: + numSamplesMinusOne = len(samples.Floats) - 1 + firstT = samples.Floats[0].T + lastT = samples.Floats[numSamplesMinusOne].T + resultValue = samples.Floats[numSamplesMinusOne].F - samples.Floats[0].F + if !isCounter { + break + } + // Handle counter resets: + prevValue := samples.Floats[0].F + for _, currPoint := range samples.Floats[1:] { + if currPoint.F < prevValue { resultValue += prevValue } - prevValue = currPoint.V + prevValue = currPoint.F } + default: + // Not enough samples. TODO(beorn7): Communicate this failure reason. + return enh.Out } // Duration between first/last samples and boundary of range. - durationToStart := float64(samples.Points[0].T-rangeStart) / 1000 - durationToEnd := float64(rangeEnd-samples.Points[len(samples.Points)-1].T) / 1000 + durationToStart := float64(firstT-rangeStart) / 1000 + durationToEnd := float64(rangeEnd-lastT) / 1000 - sampledInterval := float64(samples.Points[len(samples.Points)-1].T-samples.Points[0].T) / 1000 - averageDurationBetweenSamples := sampledInterval / float64(len(samples.Points)-1) + sampledInterval := float64(lastT-firstT) / 1000 + averageDurationBetweenSamples := sampledInterval / float64(numSamplesMinusOne) // TODO(beorn7): Do this for histograms, too. - if isCounter && resultValue > 0 && samples.Points[0].V >= 0 { - // Counters cannot be negative. If we have any slope at - // all (i.e. resultValue went up), we can extrapolate - // the zero point of the counter. If the duration to the - // zero point is shorter than the durationToStart, we - // take the zero point as the start of the series, - // thereby avoiding extrapolation to negative counter - // values. - durationToZero := sampledInterval * (samples.Points[0].V / resultValue) + if isCounter && resultValue > 0 && len(samples.Floats) > 0 && samples.Floats[0].F >= 0 { + // Counters cannot be negative. If we have any slope at all + // (i.e. resultValue went up), we can extrapolate the zero point + // of the counter. If the duration to the zero point is shorter + // than the durationToStart, we take the zero point as the start + // of the series, thereby avoiding extrapolation to negative + // counter values. + durationToZero := sampledInterval * (samples.Floats[0].F / resultValue) if durationToZero < durationToStart { durationToStart = durationToZero } @@ -158,16 +164,14 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod resultHistogram.Scale(factor) } - return append(enh.Out, Sample{ - Point: Point{V: resultValue, H: resultHistogram}, - }) + return append(enh.Out, Sample{F: resultValue, H: resultHistogram}) } // histogramRate is a helper function for extrapolatedRate. It requires // points[0] to be a histogram. It returns nil if any other Point in points is // not a histogram. -func histogramRate(points []Point, isCounter bool) *histogram.FloatHistogram { - prev := points[0].H // We already know that this is a histogram. +func histogramRate(points []HPoint, isCounter bool) *histogram.FloatHistogram { + prev := points[0].H last := points[len(points)-1].H if last == nil { return nil // Range contains a mix of histograms and floats. @@ -243,19 +247,19 @@ func instantValue(vals []parser.Value, out Vector, isRate bool) Vector { samples := vals[0].(Matrix)[0] // No sense in trying to compute a rate without at least two points. Drop // this Vector element. - if len(samples.Points) < 2 { + if len(samples.Floats) < 2 { return out } - lastSample := samples.Points[len(samples.Points)-1] - previousSample := samples.Points[len(samples.Points)-2] + lastSample := samples.Floats[len(samples.Floats)-1] + previousSample := samples.Floats[len(samples.Floats)-2] var resultValue float64 - if isRate && lastSample.V < previousSample.V { + if isRate && lastSample.F < previousSample.F { // Counter reset. - resultValue = lastSample.V + resultValue = lastSample.F } else { - resultValue = lastSample.V - previousSample.V + resultValue = lastSample.F - previousSample.F } sampledInterval := lastSample.T - previousSample.T @@ -269,9 +273,7 @@ func instantValue(vals []parser.Value, out Vector, isRate bool) Vector { resultValue /= float64(sampledInterval) / 1000 } - return append(out, Sample{ - Point: Point{V: resultValue}, - }) + return append(out, Sample{F: resultValue}) } // Calculate the trend value at the given index i in raw data d. @@ -300,10 +302,10 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode samples := vals[0].(Matrix)[0] // The smoothing factor argument. - sf := vals[1].(Vector)[0].V + sf := vals[1].(Vector)[0].F // The trend factor argument. - tf := vals[2].(Vector)[0].V + tf := vals[2].(Vector)[0].F // Check that the input parameters are valid. if sf <= 0 || sf >= 1 { @@ -313,7 +315,7 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode panic(fmt.Errorf("invalid trend factor. Expected: 0 < tf < 1, got: %f", tf)) } - l := len(samples.Points) + l := len(samples.Floats) // Can't do the smoothing operation with less than two points. if l < 2 { @@ -322,15 +324,15 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode var s0, s1, b float64 // Set initial values. - s1 = samples.Points[0].V - b = samples.Points[1].V - samples.Points[0].V + s1 = samples.Floats[0].F + b = samples.Floats[1].F - samples.Floats[0].F // Run the smoothing operation. var x, y float64 for i := 1; i < l; i++ { // Scale the raw value against the smoothing factor. - x = sf * samples.Points[i].V + x = sf * samples.Floats[i].F // Scale the last smoothed value with the trend at this point. b = calcTrendValue(i-1, tf, s0, s1, b) @@ -339,9 +341,7 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode s0, s1 = s1, x+y } - return append(enh.Out, Sample{ - Point: Point{V: s1}, - }) + return append(enh.Out, Sample{F: s1}) } // === sort(node parser.ValueTypeVector) Vector === @@ -365,15 +365,15 @@ func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel // === clamp(Vector parser.ValueTypeVector, min, max Scalar) Vector === func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { vec := vals[0].(Vector) - min := vals[1].(Vector)[0].Point.V - max := vals[2].(Vector)[0].Point.V + min := vals[1].(Vector)[0].F + max := vals[2].(Vector)[0].F if max < min { return enh.Out } for _, el := range vec { enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: math.Max(min, math.Min(max, el.V))}, + F: math.Max(min, math.Min(max, el.F)), }) } return enh.Out @@ -382,11 +382,11 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper // === clamp_max(Vector parser.ValueTypeVector, max Scalar) Vector === func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { vec := vals[0].(Vector) - max := vals[1].(Vector)[0].Point.V + max := vals[1].(Vector)[0].F for _, el := range vec { enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: math.Min(max, el.V)}, + F: math.Min(max, el.F), }) } return enh.Out @@ -395,11 +395,11 @@ func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel // === clamp_min(Vector parser.ValueTypeVector, min Scalar) Vector === func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { vec := vals[0].(Vector) - min := vals[1].(Vector)[0].Point.V + min := vals[1].(Vector)[0].F for _, el := range vec { enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: math.Max(min, el.V)}, + F: math.Max(min, el.F), }) } return enh.Out @@ -412,16 +412,16 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper // Ties are solved by rounding up. toNearest := float64(1) if len(args) >= 2 { - toNearest = vals[1].(Vector)[0].Point.V + toNearest = vals[1].(Vector)[0].F } // Invert as it seems to cause fewer floating point accuracy issues. toNearestInverse := 1.0 / toNearest for _, el := range vec { - v := math.Floor(el.V*toNearestInverse+0.5) / toNearestInverse + v := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: v}, + F: v, }) } return enh.Out @@ -431,37 +431,38 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { v := vals[0].(Vector) if len(v) != 1 { - return append(enh.Out, Sample{ - Point: Point{V: math.NaN()}, - }) + return append(enh.Out, Sample{F: math.NaN()}) } - return append(enh.Out, Sample{ - Point: Point{V: v[0].V}, - }) + return append(enh.Out, Sample{F: v[0].F}) } -func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func([]Point) float64) Vector { +func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector { el := vals[0].(Matrix)[0] - return append(enh.Out, Sample{ - Point: Point{V: aggrFn(el.Points)}, - }) + return append(enh.Out, Sample{F: aggrFn(el)}) } // === avg_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { + if len(vals[0].(Matrix)[0].Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. avg_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out + } + return aggrOverTime(vals, enh, func(s Series) float64 { var mean, count, c float64 - for _, v := range values { + for _, f := range s.Floats { count++ if math.IsInf(mean, 0) { - if math.IsInf(v.V, 0) && (mean > 0) == (v.V > 0) { - // The `mean` and `v.V` values are `Inf` of the same sign. They + if math.IsInf(f.F, 0) && (mean > 0) == (f.F > 0) { + // The `mean` and `f.F` values are `Inf` of the same sign. They // can't be subtracted, but the value of `mean` is correct // already. continue } - if !math.IsInf(v.V, 0) && !math.IsNaN(v.V) { + if !math.IsInf(f.F, 0) && !math.IsNaN(f.F) { // At this stage, the mean is an infinite. If the added // value is neither an Inf or a Nan, we can keep that mean // value. @@ -471,7 +472,7 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode continue } } - mean, c = kahanSumInc(v.V/count-mean/count, mean, c) + mean, c = kahanSumInc(f.F/count-mean/count, mean, c) } if math.IsInf(mean, 0) { @@ -483,8 +484,8 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode // === count_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { - return float64(len(values)) + return aggrOverTime(vals, enh, func(s Series) float64 { + return float64(len(s.Floats) + len(s.Histograms)) }) } @@ -492,19 +493,42 @@ func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNo func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { el := vals[0].(Matrix)[0] + var f FPoint + if len(el.Floats) > 0 { + f = el.Floats[len(el.Floats)-1] + } + + var h HPoint + if len(el.Histograms) > 0 { + h = el.Histograms[len(el.Histograms)-1] + } + + if h.H == nil || h.T < f.T { + return append(enh.Out, Sample{ + Metric: el.Metric, + F: f.F, + }) + } return append(enh.Out, Sample{ Metric: el.Metric, - Point: Point{V: el.Points[len(el.Points)-1].V}, + H: h.H, }) } // === max_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { - max := values[0].V - for _, v := range values { - if v.V > max || math.IsNaN(max) { - max = v.V + if len(vals[0].(Matrix)[0].Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. max_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out + } + return aggrOverTime(vals, enh, func(s Series) float64 { + max := s.Floats[0].F + for _, f := range s.Floats { + if f.F > max || math.IsNaN(max) { + max = f.F } } return max @@ -513,11 +537,18 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode // === min_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { - min := values[0].V - for _, v := range values { - if v.V < min || math.IsNaN(min) { - min = v.V + if len(vals[0].(Matrix)[0].Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. min_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out + } + return aggrOverTime(vals, enh, func(s Series) float64 { + min := s.Floats[0].F + for _, f := range s.Floats { + if f.F < min || math.IsNaN(min) { + min = f.F } } return min @@ -526,10 +557,17 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode // === sum_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { + if len(vals[0].(Matrix)[0].Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. sum_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out + } + return aggrOverTime(vals, enh, func(s Series) float64 { var sum, c float64 - for _, v := range values { - sum, c = kahanSumInc(v.V, sum, c) + for _, f := range s.Floats { + sum, c = kahanSumInc(f.F, sum, c) } if math.IsInf(sum, 0) { return sum @@ -540,29 +578,41 @@ func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode // === quantile_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - q := vals[0].(Vector)[0].V + q := vals[0].(Vector)[0].F el := vals[1].(Matrix)[0] - - values := make(vectorByValueHeap, 0, len(el.Points)) - for _, v := range el.Points { - values = append(values, Sample{Point: Point{V: v.V}}) + if len(el.Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. quantile_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out } - return append(enh.Out, Sample{ - Point: Point{V: quantile(q, values)}, - }) + + values := make(vectorByValueHeap, 0, len(el.Floats)) + for _, f := range el.Floats { + values = append(values, Sample{F: f.F}) + } + return append(enh.Out, Sample{F: quantile(q, values)}) } // === stddev_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { + if len(vals[0].(Matrix)[0].Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. stddev_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out + } + return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 var mean, cMean float64 var aux, cAux float64 - for _, v := range values { + for _, f := range s.Floats { count++ - delta := v.V - (mean + cMean) + delta := f.F - (mean + cMean) mean, cMean = kahanSumInc(delta/count, mean, cMean) - aux, cAux = kahanSumInc(delta*(v.V-(mean+cMean)), aux, cAux) + aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } return math.Sqrt((aux + cAux) / count) }) @@ -570,15 +620,22 @@ func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN // === stdvar_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { + if len(vals[0].(Matrix)[0].Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. stdvar_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out + } + return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 var mean, cMean float64 var aux, cAux float64 - for _, v := range values { + for _, f := range s.Floats { count++ - delta := v.V - (mean + cMean) + delta := f.F - (mean + cMean) mean, cMean = kahanSumInc(delta/count, mean, cMean) - aux, cAux = kahanSumInc(delta*(v.V-(mean+cMean)), aux, cAux) + aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } return (aux + cAux) / count }) @@ -592,7 +649,7 @@ func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe return append(enh.Out, Sample{ Metric: createLabelsForAbsentFunction(args[0]), - Point: Point{V: 1}, + F: 1, }) } @@ -602,25 +659,24 @@ func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe // Due to engine optimization, this function is only called when this condition is true. // Then, the engine post-processes the results to get the expected output. func funcAbsentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return append(enh.Out, - Sample{ - Point: Point{V: 1}, - }) + return append(enh.Out, Sample{F: 1}) } // === present_over_time(Vector parser.ValueTypeMatrix) Vector === func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { + return aggrOverTime(vals, enh, func(s Series) float64 { return 1 }) } func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector { for _, el := range vals[0].(Vector) { - enh.Out = append(enh.Out, Sample{ - Metric: enh.DropMetricName(el.Metric), - Point: Point{V: f(el.V)}, - }) + if el.H == nil { // Process only float samples. + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(el.Metric), + F: f(el.F), + }) + } } return enh.Out } @@ -741,9 +797,7 @@ func funcDeg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) // === pi() Scalar === func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return Vector{Sample{Point: Point{ - V: math.Pi, - }}} + return Vector{Sample{F: math.Pi}} } // === sgn(Vector parser.ValueTypeVector) Vector === @@ -764,7 +818,7 @@ func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe for _, el := range vec { enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: float64(el.T) / 1000}, + F: float64(el.T) / 1000, }) } return enh.Out @@ -793,7 +847,7 @@ func kahanSumInc(inc, sum, c float64) (newSum, newC float64) { // linearRegression performs a least-square linear regression analysis on the // provided SamplePairs. It returns the slope, and the intercept value at the // provided time. -func linearRegression(samples []Point, interceptTime int64) (slope, intercept float64) { +func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept float64) { var ( n float64 sumX, cX float64 @@ -803,18 +857,18 @@ func linearRegression(samples []Point, interceptTime int64) (slope, intercept fl initY float64 constY bool ) - initY = samples[0].V + initY = samples[0].F constY = true for i, sample := range samples { // Set constY to false if any new y values are encountered. - if constY && i > 0 && sample.V != initY { + if constY && i > 0 && sample.F != initY { constY = false } n += 1.0 x := float64(sample.T-interceptTime) / 1e3 sumX, cX = kahanSumInc(x, sumX, cX) - sumY, cY = kahanSumInc(sample.V, sumY, cY) - sumXY, cXY = kahanSumInc(x*sample.V, sumXY, cXY) + sumY, cY = kahanSumInc(sample.F, sumY, cY) + sumXY, cXY = kahanSumInc(x*sample.F, sumXY, cXY) sumX2, cX2 = kahanSumInc(x*x, sumX2, cX2) } if constY { @@ -842,33 +896,29 @@ func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper // No sense in trying to compute a derivative without at least two points. // Drop this Vector element. - if len(samples.Points) < 2 { + if len(samples.Floats) < 2 { return enh.Out } // We pass in an arbitrary timestamp that is near the values in use // to avoid floating point accuracy issues, see // https://github.com/prometheus/prometheus/issues/2674 - slope, _ := linearRegression(samples.Points, samples.Points[0].T) - return append(enh.Out, Sample{ - Point: Point{V: slope}, - }) + slope, _ := linearRegression(samples.Floats, samples.Floats[0].T) + return append(enh.Out, Sample{F: slope}) } // === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) Vector === func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { samples := vals[0].(Matrix)[0] - duration := vals[1].(Vector)[0].V + duration := vals[1].(Vector)[0].F // No sense in trying to predict anything without at least two points. // Drop this Vector element. - if len(samples.Points) < 2 { + if len(samples.Floats) < 2 { return enh.Out } - slope, intercept := linearRegression(samples.Points, enh.Ts) + slope, intercept := linearRegression(samples.Floats, enh.Ts) - return append(enh.Out, Sample{ - Point: Point{V: slope*duration + intercept}, - }) + return append(enh.Out, Sample{F: slope*duration + intercept}) } // === histogram_count(Vector parser.ValueTypeVector) Vector === @@ -882,7 +932,7 @@ func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalN } enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(sample.Metric), - Point: Point{V: sample.H.Count}, + F: sample.H.Count, }) } return enh.Out @@ -899,7 +949,7 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod } enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(sample.Metric), - Point: Point{V: sample.H.Sum}, + F: sample.H.Sum, }) } return enh.Out @@ -907,8 +957,8 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod // === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - lower := vals[0].(Vector)[0].V - upper := vals[1].(Vector)[0].V + lower := vals[0].(Vector)[0].F + upper := vals[1].(Vector)[0].F inVec := vals[2].(Vector) for _, sample := range inVec { @@ -918,7 +968,7 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev } enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(sample.Metric), - Point: Point{V: histogramFraction(lower, upper, sample.H)}, + F: histogramFraction(lower, upper, sample.H), }) } return enh.Out @@ -926,7 +976,7 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev // === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - q := vals[0].(Vector)[0].V + q := vals[0].(Vector)[0].F inVec := vals[1].(Vector) if enh.signatureToMetricWithBuckets == nil { @@ -965,7 +1015,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev mb = &metricWithBuckets{sample.Metric, nil} enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb } - mb.buckets = append(mb.buckets, bucket{upperBound, sample.V}) + mb.buckets = append(mb.buckets, bucket{upperBound, sample.F}) } @@ -985,7 +1035,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(sample.Metric), - Point: Point{V: histogramQuantile(q, sample.H)}, + F: histogramQuantile(q, sample.H), }) } @@ -993,7 +1043,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev if len(mb.buckets) > 0 { enh.Out = append(enh.Out, Sample{ Metric: mb.metric, - Point: Point{V: bucketQuantile(q, mb.buckets)}, + F: bucketQuantile(q, mb.buckets), }) } } @@ -1003,40 +1053,55 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev // === resets(Matrix parser.ValueTypeMatrix) Vector === func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - samples := vals[0].(Matrix)[0] - + floats := vals[0].(Matrix)[0].Floats + histograms := vals[0].(Matrix)[0].Histograms resets := 0 - prev := samples.Points[0].V - for _, sample := range samples.Points[1:] { - current := sample.V - if current < prev { - resets++ + + if len(floats) > 1 { + prev := floats[0].F + for _, sample := range floats[1:] { + current := sample.F + if current < prev { + resets++ + } + prev = current } - prev = current } - return append(enh.Out, Sample{ - Point: Point{V: float64(resets)}, - }) + if len(histograms) > 1 { + prev := histograms[0].H + for _, sample := range histograms[1:] { + current := sample.H + if current.DetectReset(prev) { + resets++ + } + prev = current + } + } + + return append(enh.Out, Sample{F: float64(resets)}) } // === changes(Matrix parser.ValueTypeMatrix) Vector === func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - samples := vals[0].(Matrix)[0] - + floats := vals[0].(Matrix)[0].Floats changes := 0 - prev := samples.Points[0].V - for _, sample := range samples.Points[1:] { - current := sample.V + + if len(floats) == 0 { + // TODO(beorn7): Only histogram values, still need to add support. + return enh.Out + } + + prev := floats[0].F + for _, sample := range floats[1:] { + current := sample.F if current != prev && !(math.IsNaN(current) && math.IsNaN(prev)) { changes++ } prev = current } - return append(enh.Out, Sample{ - Point: Point{V: float64(changes)}, - }) + return append(enh.Out, Sample{F: float64(changes)}) } // === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) Vector === @@ -1087,7 +1152,8 @@ func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNod enh.Out = append(enh.Out, Sample{ Metric: outMetric, - Point: Point{V: el.Point.V}, + F: el.F, + H: el.H, }) } return enh.Out @@ -1098,7 +1164,7 @@ func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe return append(enh.Out, Sample{ Metric: labels.Labels{}, - Point: Point{V: vals[0].(Vector)[0].V}, + F: vals[0].(Vector)[0].F, }) } @@ -1154,7 +1220,8 @@ func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe enh.Out = append(enh.Out, Sample{ Metric: outMetric, - Point: Point{V: el.Point.V}, + F: el.F, + H: el.H, }) } return enh.Out @@ -1166,15 +1233,15 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo return append(enh.Out, Sample{ Metric: labels.Labels{}, - Point: Point{V: f(time.Unix(enh.Ts/1000, 0).UTC())}, + F: f(time.Unix(enh.Ts/1000, 0).UTC()), }) } for _, el := range vals[0].(Vector) { - t := time.Unix(int64(el.V), 0).UTC() + t := time.Unix(int64(el.F), 0).UTC() enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: f(t)}, + F: f(t), }) } return enh.Out @@ -1332,10 +1399,20 @@ func (s vectorByValueHeap) Len() int { } func (s vectorByValueHeap) Less(i, j int) bool { - if math.IsNaN(s[i].V) { + // We compare histograms based on their sum of observations. + // TODO(beorn7): Is that what we want? + vi, vj := s[i].F, s[j].F + if s[i].H != nil { + vi = s[i].H.Sum + } + if s[j].H != nil { + vj = s[j].H.Sum + } + + if math.IsNaN(vi) { return true } - return s[i].V < s[j].V + return vi < vj } func (s vectorByValueHeap) Swap(i, j int) { @@ -1361,10 +1438,20 @@ func (s vectorByReverseValueHeap) Len() int { } func (s vectorByReverseValueHeap) Less(i, j int) bool { - if math.IsNaN(s[i].V) { + // We compare histograms based on their sum of observations. + // TODO(beorn7): Is that what we want? + vi, vj := s[i].F, s[j].F + if s[i].H != nil { + vi = s[i].H.Sum + } + if s[j].H != nil { + vj = s[j].H.Sum + } + + if math.IsNaN(vi) { return true } - return s[i].V > s[j].V + return vi > vj } func (s vectorByReverseValueHeap) Swap(i, j int) { diff --git a/promql/functions_test.go b/promql/functions_test.go index 1148b1339..e552424b3 100644 --- a/promql/functions_test.go +++ b/promql/functions_test.go @@ -64,7 +64,7 @@ func TestDeriv(t *testing.T) { vec, _ := result.Vector() require.Equal(t, 1, len(vec), "Expected 1 result, got %d", len(vec)) - require.Equal(t, 0.0, vec[0].V, "Expected 0.0 as value, got %f", vec[0].V) + require.Equal(t, 0.0, vec[0].F, "Expected 0.0 as value, got %f", vec[0].F) } func TestFunctionList(t *testing.T) { diff --git a/promql/quantile.go b/promql/quantile.go index 1561a2ce8..aaead671c 100644 --- a/promql/quantile.go +++ b/promql/quantile.go @@ -382,5 +382,5 @@ func quantile(q float64, values vectorByValueHeap) float64 { upperIndex := math.Min(n-1, lowerIndex+1) weight := rank - math.Floor(rank) - return values[int(lowerIndex)].V*(1-weight) + values[int(upperIndex)].V*weight + return values[int(lowerIndex)].F*(1-weight) + values[int(upperIndex)].F*weight } diff --git a/promql/test.go b/promql/test.go index 78cc1e9fb..0b3958393 100644 --- a/promql/test.go +++ b/promql/test.go @@ -281,7 +281,7 @@ func (*evalCmd) testCmd() {} type loadCmd struct { gap time.Duration metrics map[uint64]labels.Labels - defs map[uint64][]Point + defs map[uint64][]FPoint exemplars map[uint64][]exemplar.Exemplar } @@ -289,7 +289,7 @@ func newLoadCmd(gap time.Duration) *loadCmd { return &loadCmd{ gap: gap, metrics: map[uint64]labels.Labels{}, - defs: map[uint64][]Point{}, + defs: map[uint64][]FPoint{}, exemplars: map[uint64][]exemplar.Exemplar{}, } } @@ -302,13 +302,13 @@ func (cmd loadCmd) String() string { func (cmd *loadCmd) set(m labels.Labels, vals ...parser.SequenceValue) { h := m.Hash() - samples := make([]Point, 0, len(vals)) + samples := make([]FPoint, 0, len(vals)) ts := testStartTime for _, v := range vals { if !v.Omitted { - samples = append(samples, Point{ + samples = append(samples, FPoint{ T: ts.UnixNano() / int64(time.Millisecond/time.Nanosecond), - V: v.Value, + F: v.Value, }) } ts = ts.Add(cmd.gap) @@ -323,7 +323,7 @@ func (cmd *loadCmd) append(a storage.Appender) error { m := cmd.metrics[h] for _, s := range smpls { - if _, err := a.Append(0, m, s.T, s.V); err != nil { + if _, err := a.Append(0, m, s.T, s.F); err != nil { return err } } @@ -399,8 +399,8 @@ func (ev *evalCmd) compareResult(result parser.Value) error { if ev.ordered && exp.pos != pos+1 { return fmt.Errorf("expected metric %s with %v at position %d but was at %d", v.Metric, exp.vals, exp.pos, pos+1) } - if !almostEqual(exp.vals[0].Value, v.V) { - return fmt.Errorf("expected %v for %s but got %v", exp.vals[0].Value, v.Metric, v.V) + if !almostEqual(exp.vals[0].Value, v.F) { + return fmt.Errorf("expected %v for %s but got %v", exp.vals[0].Value, v.Metric, v.F) } seen[fp] = true @@ -409,7 +409,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error { if !seen[fp] { fmt.Println("vector result", len(val), ev.expr) for _, ss := range val { - fmt.Println(" ", ss.Metric, ss.Point) + fmt.Println(" ", ss.Metric, ss.T, ss.F) } return fmt.Errorf("expected metric %s with %v not found", ev.metrics[fp], expVals) } @@ -576,15 +576,15 @@ func (t *Test) exec(tc testCommand) error { mat := rangeRes.Value.(Matrix) vec := make(Vector, 0, len(mat)) for _, series := range mat { - for _, point := range series.Points { + for _, point := range series.Floats { if point.T == timeMilliseconds(iq.evalTime) { - vec = append(vec, Sample{Metric: series.Metric, Point: point}) + vec = append(vec, Sample{Metric: series.Metric, T: point.T, F: point.F}) break } } } if _, ok := res.Value.(Scalar); ok { - err = cmd.compareResult(Scalar{V: vec[0].Point.V}) + err = cmd.compareResult(Scalar{V: vec[0].F}) } else { err = cmd.compareResult(vec) } @@ -763,7 +763,7 @@ func (ll *LazyLoader) appendTill(ts int64) error { ll.loadCmd.defs[h] = smpls[i:] break } - if _, err := app.Append(0, m, s.T, s.V); err != nil { + if _, err := app.Append(0, m, s.T, s.F); err != nil { return err } if i == len(smpls)-1 { diff --git a/promql/test_test.go b/promql/test_test.go index 347f66916..cc1df62d0 100644 --- a/promql/test_test.go +++ b/promql/test_test.go @@ -47,8 +47,8 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) { series: []Series{ { Metric: labels.FromStrings("__name__", "metric1"), - Points: []Point{ - {0, 1, nil}, {10000, 2, nil}, {20000, 3, nil}, {30000, 4, nil}, {40000, 5, nil}, + Floats: []FPoint{ + {0, 1}, {10000, 2}, {20000, 3}, {30000, 4}, {40000, 5}, }, }, }, @@ -58,8 +58,8 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) { series: []Series{ { Metric: labels.FromStrings("__name__", "metric1"), - Points: []Point{ - {0, 1, nil}, {10000, 2, nil}, {20000, 3, nil}, {30000, 4, nil}, {40000, 5, nil}, + Floats: []FPoint{ + {0, 1}, {10000, 2}, {20000, 3}, {30000, 4}, {40000, 5}, }, }, }, @@ -69,8 +69,8 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) { series: []Series{ { Metric: labels.FromStrings("__name__", "metric1"), - Points: []Point{ - {0, 1, nil}, {10000, 2, nil}, {20000, 3, nil}, {30000, 4, nil}, {40000, 5, nil}, {50000, 6, nil}, {60000, 7, nil}, + Floats: []FPoint{ + {0, 1}, {10000, 2}, {20000, 3}, {30000, 4}, {40000, 5}, {50000, 6}, {60000, 7}, }, }, }, @@ -89,14 +89,14 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) { series: []Series{ { Metric: labels.FromStrings("__name__", "metric1"), - Points: []Point{ - {0, 1, nil}, {10000, 1, nil}, {20000, 1, nil}, {30000, 1, nil}, {40000, 1, nil}, {50000, 1, nil}, + Floats: []FPoint{ + {0, 1}, {10000, 1}, {20000, 1}, {30000, 1}, {40000, 1}, {50000, 1}, }, }, { Metric: labels.FromStrings("__name__", "metric2"), - Points: []Point{ - {0, 1, nil}, {10000, 2, nil}, {20000, 3, nil}, {30000, 4, nil}, {40000, 5, nil}, {50000, 6, nil}, {60000, 7, nil}, {70000, 8, nil}, + Floats: []FPoint{ + {0, 1}, {10000, 2}, {20000, 3}, {30000, 4}, {40000, 5}, {50000, 6}, {60000, 7}, {70000, 8}, }, }, }, @@ -146,7 +146,7 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) { it := storageSeries.Iterator(nil) for it.Next() == chunkenc.ValFloat { t, v := it.At() - got.Points = append(got.Points, Point{T: t, V: v}) + got.Floats = append(got.Floats, FPoint{T: t, F: v}) } require.NoError(t, it.Err()) diff --git a/promql/value.go b/promql/value.go index 91904dda2..f59a25112 100644 --- a/promql/value.go +++ b/promql/value.go @@ -17,6 +17,7 @@ import ( "encoding/json" "errors" "fmt" + "math" "strconv" "strings" @@ -64,76 +65,72 @@ func (s Scalar) MarshalJSON() ([]byte, error) { // Series is a stream of data points belonging to a metric. type Series struct { - Metric labels.Labels - Points []Point + Metric labels.Labels `json:"metric"` + Floats []FPoint `json:"values,omitempty"` + Histograms []HPoint `json:"histograms,omitempty"` } func (s Series) String() string { - vals := make([]string, len(s.Points)) - for i, v := range s.Points { - vals[i] = v.String() + // TODO(beorn7): This currently renders floats first and then + // histograms, each sorted by timestamp. Maybe, in mixed series, that's + // fine. Maybe, however, primary sorting by timestamp is preferred, in + // which case this has to be changed. + vals := make([]string, 0, len(s.Floats)+len(s.Histograms)) + for _, f := range s.Floats { + vals = append(vals, f.String()) + } + for _, h := range s.Histograms { + vals = append(vals, h.String()) } return fmt.Sprintf("%s =>\n%s", s.Metric, strings.Join(vals, "\n")) } -// MarshalJSON is mirrored in web/api/v1/api.go for efficiency reasons. -// This implementation is still provided for debug purposes and usage -// without jsoniter. -func (s Series) MarshalJSON() ([]byte, error) { - // Note that this is rather inefficient because it re-creates the whole - // series, just separated by Histogram Points and Value Points. For API - // purposes, there is a more efficient jsoniter implementation in - // web/api/v1/api.go. - series := struct { - M labels.Labels `json:"metric"` - V []Point `json:"values,omitempty"` - H []Point `json:"histograms,omitempty"` - }{ - M: s.Metric, - } - for _, p := range s.Points { - if p.H == nil { - series.V = append(series.V, p) - continue - } - series.H = append(series.H, p) - } - return json.Marshal(series) -} - -// Point represents a single data point for a given timestamp. -// If H is not nil, then this is a histogram point and only (T, H) is valid. -// If H is nil, then only (T, V) is valid. -type Point struct { +// FPoint represents a single float data point for a given timestamp. +type FPoint struct { T int64 - V float64 - H *histogram.FloatHistogram + F float64 } -func (p Point) String() string { - var s string - if p.H != nil { - s = p.H.String() - } else { - s = strconv.FormatFloat(p.V, 'f', -1, 64) - } +func (p FPoint) String() string { + s := strconv.FormatFloat(p.F, 'f', -1, 64) return fmt.Sprintf("%s @[%v]", s, p.T) } // MarshalJSON implements json.Marshaler. // -// JSON marshaling is only needed for the HTTP API. Since Point is such a +// JSON marshaling is only needed for the HTTP API. Since FPoint is such a // frequently marshaled type, it gets an optimized treatment directly in // web/api/v1/api.go. Therefore, this method is unused within Prometheus. It is // still provided here as convenience for debugging and for other users of this // code. Also note that the different marshaling implementations might lead to // slightly different results in terms of formatting and rounding of the // timestamp. -func (p Point) MarshalJSON() ([]byte, error) { - if p.H == nil { - v := strconv.FormatFloat(p.V, 'f', -1, 64) - return json.Marshal([...]interface{}{float64(p.T) / 1000, v}) - } +func (p FPoint) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(p.F, 'f', -1, 64) + return json.Marshal([...]interface{}{float64(p.T) / 1000, v}) +} + +// HPoint represents a single histogram data point for a given timestamp. +// H must never be nil. +type HPoint struct { + T int64 + H *histogram.FloatHistogram +} + +func (p HPoint) String() string { + return fmt.Sprintf("%s @[%v]", p.H.String(), p.T) +} + +// MarshalJSON implements json.Marshaler. +// +// JSON marshaling is only needed for the HTTP API. Since HPoint is such a +// frequently marshaled type, it gets an optimized treatment directly in +// web/api/v1/api.go. Therefore, this method is unused within Prometheus. It is +// still provided here as convenience for debugging and for other users of this +// code. Also note that the different marshaling implementations might lead to +// slightly different results in terms of formatting and rounding of the +// timestamp. +func (p HPoint) MarshalJSON() ([]byte, error) { h := struct { Count string `json:"count"` Sum string `json:"sum"` @@ -171,42 +168,54 @@ func (p Point) MarshalJSON() ([]byte, error) { return json.Marshal([...]interface{}{float64(p.T) / 1000, h}) } -// Sample is a single sample belonging to a metric. +// Sample is a single sample belonging to a metric. It represents either a float +// sample or a histogram sample. If H is nil, it is a float sample. Otherwise, +// it is a histogram sample. type Sample struct { - Point + T int64 + F float64 + H *histogram.FloatHistogram Metric labels.Labels } func (s Sample) String() string { - return fmt.Sprintf("%s => %s", s.Metric, s.Point) + var str string + if s.H == nil { + p := FPoint{T: s.T, F: s.F} + str = p.String() + } else { + p := HPoint{T: s.T, H: s.H} + str = p.String() + } + return fmt.Sprintf("%s => %s", s.Metric, str) } -// MarshalJSON is mirrored in web/api/v1/api.go with jsoniter because Point -// wouldn't be marshaled with jsoniter in all cases otherwise. +// MarshalJSON is mirrored in web/api/v1/api.go with jsoniter because FPoint and +// HPoint wouldn't be marshaled with jsoniter otherwise. func (s Sample) MarshalJSON() ([]byte, error) { - if s.Point.H == nil { - v := struct { + if s.H == nil { + f := struct { M labels.Labels `json:"metric"` - V Point `json:"value"` + F FPoint `json:"value"` }{ M: s.Metric, - V: s.Point, + F: FPoint{T: s.T, F: s.F}, } - return json.Marshal(v) + return json.Marshal(f) } h := struct { M labels.Labels `json:"metric"` - H Point `json:"histogram"` + H HPoint `json:"histogram"` }{ M: s.Metric, - H: s.Point, + H: HPoint{T: s.T, H: s.H}, } return json.Marshal(h) } -// Vector is basically only an alias for model.Samples, but the -// contract is that in a Vector, all Samples have the same timestamp. +// Vector is basically only an an alias for []Sample, but the contract is that +// in a Vector, all Samples have the same timestamp. type Vector []Sample func (vec Vector) String() string { @@ -258,7 +267,7 @@ func (m Matrix) String() string { func (m Matrix) TotalSamples() int { numSamples := 0 for _, series := range m { - numSamples += len(series.Points) + numSamples += len(series.Floats) + len(series.Histograms) } return numSamples } @@ -362,7 +371,8 @@ func (ss *StorageSeries) Labels() labels.Labels { return ss.series.Metric } -// Iterator returns a new iterator of the data of the series. +// Iterator returns a new iterator of the data of the series. In case of +// multiple samples with the same timestamp, it returns the float samples first. func (ss *StorageSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { if ssi, ok := it.(*storageSeriesIterator); ok { ssi.reset(ss.series) @@ -372,44 +382,51 @@ func (ss *StorageSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { } type storageSeriesIterator struct { - points []Point - curr int + floats []FPoint + histograms []HPoint + iFloats, iHistograms int + currT int64 + currF float64 + currH *histogram.FloatHistogram } func newStorageSeriesIterator(series Series) *storageSeriesIterator { return &storageSeriesIterator{ - points: series.Points, - curr: -1, + floats: series.Floats, + histograms: series.Histograms, + iFloats: -1, + iHistograms: 0, + currT: math.MinInt64, } } func (ssi *storageSeriesIterator) reset(series Series) { - ssi.points = series.Points - ssi.curr = -1 + ssi.floats = series.Floats + ssi.histograms = series.Histograms + ssi.iFloats = -1 + ssi.iHistograms = 0 + ssi.currT = math.MinInt64 + ssi.currF = 0 + ssi.currH = nil } func (ssi *storageSeriesIterator) Seek(t int64) chunkenc.ValueType { - i := ssi.curr - if i < 0 { - i = 0 + if ssi.iFloats >= len(ssi.floats) && ssi.iHistograms >= len(ssi.histograms) { + return chunkenc.ValNone } - for ; i < len(ssi.points); i++ { - p := ssi.points[i] - if p.T >= t { - ssi.curr = i - if p.H != nil { - return chunkenc.ValFloatHistogram - } - return chunkenc.ValFloat + for ssi.currT < t { + if ssi.Next() == chunkenc.ValNone { + return chunkenc.ValNone } } - ssi.curr = len(ssi.points) - 1 - return chunkenc.ValNone + if ssi.currH != nil { + return chunkenc.ValFloatHistogram + } + return chunkenc.ValFloat } func (ssi *storageSeriesIterator) At() (t int64, v float64) { - p := ssi.points[ssi.curr] - return p.T, p.V + return ssi.currT, ssi.currF } func (ssi *storageSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { @@ -417,25 +434,59 @@ func (ssi *storageSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { } func (ssi *storageSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { - p := ssi.points[ssi.curr] - return p.T, p.H + return ssi.currT, ssi.currH } func (ssi *storageSeriesIterator) AtT() int64 { - p := ssi.points[ssi.curr] - return p.T + return ssi.currT } func (ssi *storageSeriesIterator) Next() chunkenc.ValueType { - ssi.curr++ - if ssi.curr >= len(ssi.points) { - return chunkenc.ValNone + if ssi.currH != nil { + ssi.iHistograms++ + } else { + ssi.iFloats++ } - p := ssi.points[ssi.curr] - if p.H != nil { + var ( + pickH, pickF = false, false + floatsExhausted = ssi.iFloats >= len(ssi.floats) + histogramsExhausted = ssi.iHistograms >= len(ssi.histograms) + ) + + switch { + case floatsExhausted: + if histogramsExhausted { // Both exhausted! + return chunkenc.ValNone + } + pickH = true + case histogramsExhausted: // and floats not exhausted. + pickF = true + // From here on, we have to look at timestamps. + case ssi.histograms[ssi.iHistograms].T < ssi.floats[ssi.iFloats].T: + // Next histogram comes before next float. + pickH = true + default: + // In all other cases, we pick float so that we first iterate + // through floats if the timestamp is the same. + pickF = true + } + + switch { + case pickF: + p := ssi.floats[ssi.iFloats] + ssi.currT = p.T + ssi.currF = p.F + ssi.currH = nil + return chunkenc.ValFloat + case pickH: + p := ssi.histograms[ssi.iHistograms] + ssi.currT = p.T + ssi.currF = 0 + ssi.currH = p.H return chunkenc.ValFloatHistogram + default: + panic("storageSeriesIterater.Next failed to pick value type") } - return chunkenc.ValFloat } func (ssi *storageSeriesIterator) Err() error { diff --git a/rules/alerting.go b/rules/alerting.go index c793b90cb..aed4e8f53 100644 --- a/rules/alerting.go +++ b/rules/alerting.go @@ -235,7 +235,8 @@ func (r *AlertingRule) sample(alert *Alert, ts time.Time) promql.Sample { s := promql.Sample{ Metric: lb.Labels(), - Point: promql.Point{T: timestamp.FromTime(ts), V: 1}, + T: timestamp.FromTime(ts), + F: 1, } return s } @@ -253,7 +254,8 @@ func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) pro s := promql.Sample{ Metric: lb.Labels(), - Point: promql.Point{T: timestamp.FromTime(ts), V: v}, + T: timestamp.FromTime(ts), + F: v, } return s } @@ -339,7 +341,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, // Provide the alert information to the template. l := smpl.Metric.Map() - tmplData := template.AlertTemplateData(l, r.externalLabels, r.externalURL, smpl.V) + tmplData := template.AlertTemplateData(l, r.externalLabels, r.externalURL, smpl.F) // Inject some convenience variables that are easier to remember for users // who are not used to Go's templating system. defs := []string{ @@ -394,7 +396,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, Annotations: annotations, ActiveAt: ts, State: StatePending, - Value: smpl.V, + Value: smpl.F, } } diff --git a/rules/alerting_test.go b/rules/alerting_test.go index d6c0a9a15..13aab9804 100644 --- a/rules/alerting_test.go +++ b/rules/alerting_test.go @@ -109,7 +109,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) { "job", "app-server", "severity", "critical", ), - Point: promql.Point{V: 1}, + F: 1, }, }, { @@ -122,7 +122,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) { "job", "app-server", "severity", "warning", ), - Point: promql.Point{V: 1}, + F: 1, }, }, { @@ -135,7 +135,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) { "job", "app-server", "severity", "critical", ), - Point: promql.Point{V: 1}, + F: 1, }, }, { @@ -148,7 +148,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) { "job", "app-server", "severity", "critical", ), - Point: promql.Point{V: 1}, + F: 1, }, }, } @@ -157,7 +157,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) { for i, result := range results { t.Logf("case %d", i) evalTime := baseTime.Add(time.Duration(i) * time.Minute) - result[0].Point.T = timestamp.FromTime(evalTime) + result[0].T = timestamp.FromTime(evalTime) res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0) require.NoError(t, err) @@ -225,7 +225,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) { "job", "app-server", "templated_label", "There are 0 external Labels, of which foo is .", ), - Point: promql.Point{V: 1}, + F: 1, }, promql.Sample{ Metric: labels.FromStrings( @@ -236,13 +236,13 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) { "job", "app-server", "templated_label", "There are 2 external Labels, of which foo is bar.", ), - Point: promql.Point{V: 1}, + F: 1, }, } evalTime := time.Unix(0, 0) - result[0].Point.T = timestamp.FromTime(evalTime) - result[1].Point.T = timestamp.FromTime(evalTime) + result[0].T = timestamp.FromTime(evalTime) + result[1].T = timestamp.FromTime(evalTime) var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. res, err := ruleWithoutExternalLabels.Eval( @@ -321,7 +321,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) { "job", "app-server", "templated_label", "The external URL is .", ), - Point: promql.Point{V: 1}, + F: 1, }, promql.Sample{ Metric: labels.FromStrings( @@ -332,13 +332,13 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) { "job", "app-server", "templated_label", "The external URL is http://localhost:1234.", ), - Point: promql.Point{V: 1}, + F: 1, }, } evalTime := time.Unix(0, 0) - result[0].Point.T = timestamp.FromTime(evalTime) - result[1].Point.T = timestamp.FromTime(evalTime) + result[0].T = timestamp.FromTime(evalTime) + result[1].T = timestamp.FromTime(evalTime) var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. res, err := ruleWithoutExternalURL.Eval( @@ -405,12 +405,12 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) { "instance", "0", "job", "app-server", ), - Point: promql.Point{V: 1}, + F: 1, }, } evalTime := time.Unix(0, 0) - result[0].Point.T = timestamp.FromTime(evalTime) + result[0].T = timestamp.FromTime(evalTime) var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. res, err := rule.Eval( @@ -760,7 +760,7 @@ func TestKeepFiringFor(t *testing.T) { "instance", "0", "job", "app-server", ), - Point: promql.Point{V: 1}, + F: 1, }, }, { @@ -772,7 +772,7 @@ func TestKeepFiringFor(t *testing.T) { "instance", "0", "job", "app-server", ), - Point: promql.Point{V: 1}, + F: 1, }, }, { @@ -784,7 +784,7 @@ func TestKeepFiringFor(t *testing.T) { "instance", "0", "job", "app-server", ), - Point: promql.Point{V: 1}, + F: 1, }, }, { @@ -796,7 +796,7 @@ func TestKeepFiringFor(t *testing.T) { "instance", "0", "job", "app-server", ), - Point: promql.Point{V: 1}, + F: 1, }, }, // From now on the alert should keep firing. @@ -809,7 +809,7 @@ func TestKeepFiringFor(t *testing.T) { "instance", "0", "job", "app-server", ), - Point: promql.Point{V: 1}, + F: 1, }, }, } @@ -818,7 +818,7 @@ func TestKeepFiringFor(t *testing.T) { for i, result := range results { t.Logf("case %d", i) evalTime := baseTime.Add(time.Duration(i) * time.Minute) - result[0].Point.T = timestamp.FromTime(evalTime) + result[0].T = timestamp.FromTime(evalTime) res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0) require.NoError(t, err) @@ -871,11 +871,11 @@ func TestPendingAndKeepFiringFor(t *testing.T) { "instance", "0", "job", "app-server", ), - Point: promql.Point{V: 1}, + F: 1, } baseTime := time.Unix(0, 0) - result.Point.T = timestamp.FromTime(baseTime) + result.T = timestamp.FromTime(baseTime) res, err := rule.Eval(suite.Context(), baseTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0) require.NoError(t, err) diff --git a/rules/manager.go b/rules/manager.go index 108b6a77a..07d50be1b 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -202,7 +202,8 @@ func EngineQueryFunc(engine *promql.Engine, q storage.Queryable) QueryFunc { return v, nil case promql.Scalar: return promql.Vector{promql.Sample{ - Point: promql.Point{T: v.T, V: v.V}, + T: v.T, + F: v.V, Metric: labels.Labels{}, }}, nil default: @@ -695,7 +696,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { if s.H != nil { _, err = app.AppendHistogram(0, s.Metric, s.T, nil, s.H) } else { - _, err = app.Append(0, s.Metric, s.T, s.V) + _, err = app.Append(0, s.Metric, s.T, s.F) } if err != nil { diff --git a/rules/manager_test.go b/rules/manager_test.go index 1faf730be..440e06c9a 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -80,7 +80,7 @@ func TestAlertingRule(t *testing.T) { "job", "app-server", "severity", "critical", ), - Point: promql.Point{V: 1}, + F: 1, }, promql.Sample{ Metric: labels.FromStrings( @@ -92,7 +92,7 @@ func TestAlertingRule(t *testing.T) { "job", "app-server", "severity", "critical", ), - Point: promql.Point{V: 1}, + F: 1, }, promql.Sample{ Metric: labels.FromStrings( @@ -104,7 +104,7 @@ func TestAlertingRule(t *testing.T) { "job", "app-server", "severity", "critical", ), - Point: promql.Point{V: 1}, + F: 1, }, promql.Sample{ Metric: labels.FromStrings( @@ -116,7 +116,7 @@ func TestAlertingRule(t *testing.T) { "job", "app-server", "severity", "critical", ), - Point: promql.Point{V: 1}, + F: 1, }, } @@ -223,7 +223,7 @@ func TestForStateAddSamples(t *testing.T) { "job", "app-server", "severity", "critical", ), - Point: promql.Point{V: 1}, + F: 1, }, promql.Sample{ Metric: labels.FromStrings( @@ -234,7 +234,7 @@ func TestForStateAddSamples(t *testing.T) { "job", "app-server", "severity", "critical", ), - Point: promql.Point{V: 1}, + F: 1, }, promql.Sample{ Metric: labels.FromStrings( @@ -245,7 +245,7 @@ func TestForStateAddSamples(t *testing.T) { "job", "app-server", "severity", "critical", ), - Point: promql.Point{V: 1}, + F: 1, }, promql.Sample{ Metric: labels.FromStrings( @@ -256,7 +256,7 @@ func TestForStateAddSamples(t *testing.T) { "job", "app-server", "severity", "critical", ), - Point: promql.Point{V: 1}, + F: 1, }, } @@ -327,8 +327,8 @@ func TestForStateAddSamples(t *testing.T) { for i := range test.result { test.result[i].T = timestamp.FromTime(evalTime) // Updating the expected 'for' state. - if test.result[i].V >= 0 { - test.result[i].V = forState + if test.result[i].F >= 0 { + test.result[i].F = forState } } require.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res)) @@ -584,29 +584,29 @@ func TestStaleness(t *testing.T) { metricSample, ok := samples[metric] require.True(t, ok, "Series %s not returned.", metric) - require.True(t, value.IsStaleNaN(metricSample[2].V), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[2].V)) - metricSample[2].V = 42 // require.Equal cannot handle NaN. + require.True(t, value.IsStaleNaN(metricSample[2].F), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[2].F)) + metricSample[2].F = 42 // require.Equal cannot handle NaN. - want := map[string][]promql.Point{ - metric: {{T: 0, V: 2}, {T: 1000, V: 3}, {T: 2000, V: 42}}, + want := map[string][]promql.FPoint{ + metric: {{T: 0, F: 2}, {T: 1000, F: 3}, {T: 2000, F: 42}}, } require.Equal(t, want, samples) } // Convert a SeriesSet into a form usable with require.Equal. -func readSeriesSet(ss storage.SeriesSet) (map[string][]promql.Point, error) { - result := map[string][]promql.Point{} +func readSeriesSet(ss storage.SeriesSet) (map[string][]promql.FPoint, error) { + result := map[string][]promql.FPoint{} var it chunkenc.Iterator for ss.Next() { series := ss.At() - points := []promql.Point{} + points := []promql.FPoint{} it := series.Iterator(it) for it.Next() == chunkenc.ValFloat { t, v := it.At() - points = append(points, promql.Point{T: t, V: v}) + points = append(points, promql.FPoint{T: t, F: v}) } name := series.Labels().String() @@ -707,7 +707,7 @@ func TestDeletedRuleMarkedStale(t *testing.T) { metricSample, ok := samples[metric] require.True(t, ok, "Series %s not returned.", metric) - require.True(t, value.IsStaleNaN(metricSample[0].V), "Appended sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[0].V)) + require.True(t, value.IsStaleNaN(metricSample[0].F), "Appended sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[0].F)) } func TestUpdate(t *testing.T) { @@ -1134,7 +1134,7 @@ func countStaleNaN(t *testing.T, st storage.Storage) int { require.True(t, ok, "Series %s not returned.", metric) for _, s := range metricSample { - if value.IsStaleNaN(s.V) { + if value.IsStaleNaN(s.F) { c++ } } diff --git a/rules/recording_test.go b/rules/recording_test.go index 932b880b2..61f47e048 100644 --- a/rules/recording_test.go +++ b/rules/recording_test.go @@ -46,11 +46,13 @@ var ruleEvalTestScenarios = []struct { expected: promql.Vector{ promql.Sample{ Metric: labels.FromStrings("__name__", "test_rule", "label_a", "1", "label_b", "3"), - Point: promql.Point{V: 1, T: timestamp.FromTime(ruleEvaluationTime)}, + F: 1, + T: timestamp.FromTime(ruleEvaluationTime), }, promql.Sample{ Metric: labels.FromStrings("__name__", "test_rule", "label_a", "2", "label_b", "4"), - Point: promql.Point{V: 10, T: timestamp.FromTime(ruleEvaluationTime)}, + F: 10, + T: timestamp.FromTime(ruleEvaluationTime), }, }, }, @@ -61,11 +63,13 @@ var ruleEvalTestScenarios = []struct { expected: promql.Vector{ promql.Sample{ Metric: labels.FromStrings("__name__", "test_rule", "label_a", "1", "label_b", "3", "extra_from_rule", "foo"), - Point: promql.Point{V: 1, T: timestamp.FromTime(ruleEvaluationTime)}, + F: 1, + T: timestamp.FromTime(ruleEvaluationTime), }, promql.Sample{ Metric: labels.FromStrings("__name__", "test_rule", "label_a", "2", "label_b", "4", "extra_from_rule", "foo"), - Point: promql.Point{V: 10, T: timestamp.FromTime(ruleEvaluationTime)}, + F: 10, + T: timestamp.FromTime(ruleEvaluationTime), }, }, }, @@ -76,11 +80,13 @@ var ruleEvalTestScenarios = []struct { expected: promql.Vector{ promql.Sample{ Metric: labels.FromStrings("__name__", "test_rule", "label_a", "from_rule", "label_b", "3"), - Point: promql.Point{V: 1, T: timestamp.FromTime(ruleEvaluationTime)}, + F: 1, + T: timestamp.FromTime(ruleEvaluationTime), }, promql.Sample{ Metric: labels.FromStrings("__name__", "test_rule", "label_a", "from_rule", "label_b", "4"), - Point: promql.Point{V: 10, T: timestamp.FromTime(ruleEvaluationTime)}, + F: 10, + T: timestamp.FromTime(ruleEvaluationTime), }, }, }, @@ -91,11 +97,13 @@ var ruleEvalTestScenarios = []struct { expected: promql.Vector{ promql.Sample{ Metric: labels.FromStrings("__name__", "test_rule", "label_a", "1", "label_b", "3"), - Point: promql.Point{V: 2, T: timestamp.FromTime(ruleEvaluationTime)}, + F: 2, + T: timestamp.FromTime(ruleEvaluationTime), }, promql.Sample{ Metric: labels.FromStrings("__name__", "test_rule", "label_a", "2", "label_b", "4"), - Point: promql.Point{V: 20, T: timestamp.FromTime(ruleEvaluationTime)}, + F: 20, + T: timestamp.FromTime(ruleEvaluationTime), }, }, }, diff --git a/template/template.go b/template/template.go index d5f80006f..d61a880a2 100644 --- a/template/template.go +++ b/template/template.go @@ -93,7 +93,7 @@ func query(ctx context.Context, q string, ts time.Time, queryFn QueryFunc) (quer result := make(queryResult, len(vector)) for n, v := range vector { s := sample{ - Value: v.V, + Value: v.F, Labels: v.Metric.Map(), } result[n] = &s diff --git a/template/template_test.go b/template/template_test.go index 4d7524d6f..e7bdcc3b8 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -70,7 +70,7 @@ func TestTemplateExpansion(t *testing.T) { { text: "{{ query \"1.5\" | first | value }}", output: "1.5", - queryResult: promql.Vector{{Point: promql.Point{T: 0, V: 1.5}}}, + queryResult: promql.Vector{{T: 0, F: 1.5}}, }, { // Get value from query. @@ -78,7 +78,8 @@ func TestTemplateExpansion(t *testing.T) { queryResult: promql.Vector{ { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), - Point: promql.Point{T: 0, V: 11}, + T: 0, + F: 11, }, }, output: "11", @@ -90,7 +91,8 @@ func TestTemplateExpansion(t *testing.T) { queryResult: promql.Vector{ { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), - Point: promql.Point{T: 0, V: 11}, + T: 0, + F: 11, }, }, output: "a", @@ -101,7 +103,8 @@ func TestTemplateExpansion(t *testing.T) { queryResult: promql.Vector{ { Metric: labels.FromStrings(labels.MetricName, "metric", "__value__", "a"), - Point: promql.Point{T: 0, V: 11}, + T: 0, + F: 11, }, }, output: "a", @@ -112,7 +115,8 @@ func TestTemplateExpansion(t *testing.T) { queryResult: promql.Vector{ { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), - Point: promql.Point{T: 0, V: 11}, + T: 0, + F: 11, }, }, output: "", @@ -123,7 +127,8 @@ func TestTemplateExpansion(t *testing.T) { queryResult: promql.Vector{ { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), - Point: promql.Point{T: 0, V: 11}, + T: 0, + F: 11, }, }, output: "", @@ -133,7 +138,8 @@ func TestTemplateExpansion(t *testing.T) { queryResult: promql.Vector{ { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), - Point: promql.Point{T: 0, V: 11}, + T: 0, + F: 11, }, }, output: "", @@ -145,10 +151,12 @@ func TestTemplateExpansion(t *testing.T) { queryResult: promql.Vector{ { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "b"), - Point: promql.Point{T: 0, V: 21}, + T: 0, + F: 21, }, { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), - Point: promql.Point{T: 0, V: 11}, + T: 0, + F: 11, }, }, output: "a:11: b:21: ", diff --git a/util/jsonutil/marshal.go b/util/jsonutil/marshal.go index a82ae100d..4400385d0 100644 --- a/util/jsonutil/marshal.go +++ b/util/jsonutil/marshal.go @@ -18,6 +18,8 @@ import ( "strconv" jsoniter "github.com/json-iterator/go" + + "github.com/prometheus/prometheus/model/histogram" ) // MarshalTimestamp marshals a point timestamp using the passed jsoniter stream. @@ -42,8 +44,8 @@ func MarshalTimestamp(t int64, stream *jsoniter.Stream) { } } -// MarshalValue marshals a point value using the passed jsoniter stream. -func MarshalValue(v float64, stream *jsoniter.Stream) { +// MarshalFloat marshals a float value using the passed jsoniter stream. +func MarshalFloat(v float64, stream *jsoniter.Stream) { stream.WriteRaw(`"`) // Taken from https://github.com/json-iterator/go/blob/master/stream_float.go#L71 as a workaround // to https://github.com/json-iterator/go/issues/365 (jsoniter, to follow json standard, doesn't allow inf/nan). @@ -60,3 +62,76 @@ func MarshalValue(v float64, stream *jsoniter.Stream) { stream.SetBuffer(buf) stream.WriteRaw(`"`) } + +// MarshalHistogram marshals a histogram value using the passed jsoniter stream. +// It writes something like: +// +// { +// "count": "42", +// "sum": "34593.34", +// "buckets": [ +// [ 3, "-0.25", "0.25", "3"], +// [ 0, "0.25", "0.5", "12"], +// [ 0, "0.5", "1", "21"], +// [ 0, "2", "4", "6"] +// ] +// } +// +// The 1st element in each bucket array determines if the boundaries are +// inclusive (AKA closed) or exclusive (AKA open): +// +// 0: lower exclusive, upper inclusive +// 1: lower inclusive, upper exclusive +// 2: both exclusive +// 3: both inclusive +// +// The 2nd and 3rd elements are the lower and upper boundary. The 4th element is +// the bucket count. +func MarshalHistogram(h *histogram.FloatHistogram, stream *jsoniter.Stream) { + stream.WriteObjectStart() + stream.WriteObjectField(`count`) + MarshalFloat(h.Count, stream) + stream.WriteMore() + stream.WriteObjectField(`sum`) + MarshalFloat(h.Sum, stream) + + bucketFound := false + it := h.AllBucketIterator() + for it.Next() { + bucket := it.At() + if bucket.Count == 0 { + continue // No need to expose empty buckets in JSON. + } + stream.WriteMore() + if !bucketFound { + stream.WriteObjectField(`buckets`) + stream.WriteArrayStart() + } + bucketFound = true + boundaries := 2 // Exclusive on both sides AKA open interval. + if bucket.LowerInclusive { + if bucket.UpperInclusive { + boundaries = 3 // Inclusive on both sides AKA closed interval. + } else { + boundaries = 1 // Inclusive only on lower end AKA right open. + } + } else { + if bucket.UpperInclusive { + boundaries = 0 // Inclusive only on upper end AKA left open. + } + } + stream.WriteArrayStart() + stream.WriteInt(boundaries) + stream.WriteMore() + MarshalFloat(bucket.Lower, stream) + stream.WriteMore() + MarshalFloat(bucket.Upper, stream) + stream.WriteMore() + MarshalFloat(bucket.Count, stream) + stream.WriteArrayEnd() + } + if bucketFound { + stream.WriteArrayEnd() + } + stream.WriteObjectEnd() +} diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 3b6ade562..aeea87ca7 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -41,7 +41,6 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" - "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/model/timestamp" @@ -217,7 +216,8 @@ type API struct { func init() { jsoniter.RegisterTypeEncoderFunc("promql.Series", marshalSeriesJSON, marshalSeriesJSONIsEmpty) jsoniter.RegisterTypeEncoderFunc("promql.Sample", marshalSampleJSON, marshalSampleJSONIsEmpty) - jsoniter.RegisterTypeEncoderFunc("promql.Point", marshalPointJSON, marshalPointJSONIsEmpty) + jsoniter.RegisterTypeEncoderFunc("promql.FPoint", marshalFPointJSON, marshalPointJSONIsEmpty) + jsoniter.RegisterTypeEncoderFunc("promql.HPoint", marshalHPointJSON, marshalPointJSONIsEmpty) jsoniter.RegisterTypeEncoderFunc("exemplar.Exemplar", marshalExemplarJSON, marshalExemplarJSONEmpty) } @@ -1724,7 +1724,7 @@ OUTER: // < more values> // ], // "histograms": [ -// [ 1435781451.781, { < histogram, see below > } ], +// [ 1435781451.781, { < histogram, see jsonutil.MarshalHistogram > } ], // < more histograms > // ], // }, @@ -1739,41 +1739,26 @@ func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { } stream.SetBuffer(append(stream.Buffer(), m...)) - // We make two passes through the series here: In the first marshaling - // all value points, in the second marshaling all histogram - // points. That's probably cheaper than just one pass in which we copy - // out histogram Points into a newly allocated slice for separate - // marshaling. (Could be benchmarked, though.) - var foundValue, foundHistogram bool - for _, p := range s.Points { - if p.H == nil { - stream.WriteMore() - if !foundValue { - stream.WriteObjectField(`values`) - stream.WriteArrayStart() - } - foundValue = true - marshalPointJSON(unsafe.Pointer(&p), stream) - } else { - foundHistogram = true + for i, p := range s.Floats { + stream.WriteMore() + if i == 0 { + stream.WriteObjectField(`values`) + stream.WriteArrayStart() } + marshalFPointJSON(unsafe.Pointer(&p), stream) } - if foundValue { + if len(s.Floats) > 0 { stream.WriteArrayEnd() } - if foundHistogram { - firstHistogram := true - for _, p := range s.Points { - if p.H != nil { - stream.WriteMore() - if firstHistogram { - stream.WriteObjectField(`histograms`) - stream.WriteArrayStart() - } - firstHistogram = false - marshalPointJSON(unsafe.Pointer(&p), stream) - } + for i, p := range s.Histograms { + stream.WriteMore() + if i == 0 { + stream.WriteObjectField(`histograms`) + stream.WriteArrayStart() } + marshalHPointJSON(unsafe.Pointer(&p), stream) + } + if len(s.Histograms) > 0 { stream.WriteArrayEnd() } stream.WriteObjectEnd() @@ -1791,7 +1776,7 @@ func marshalSeriesJSONIsEmpty(ptr unsafe.Pointer) bool { // "job" : "prometheus", // "instance" : "localhost:9090" // }, -// "value": [ 1435781451.781, "1" ] +// "value": [ 1435781451.781, "1.234" ] // }, // // For histogram samples, it writes something like this: @@ -1802,7 +1787,7 @@ func marshalSeriesJSONIsEmpty(ptr unsafe.Pointer) bool { // "job" : "prometheus", // "instance" : "localhost:9090" // }, -// "histogram": [ 1435781451.781, { < histogram, see below > } ] +// "histogram": [ 1435781451.781, { < histogram, see jsonutil.MarshalHistogram > } ] // }, func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { s := *((*promql.Sample)(ptr)) @@ -1815,12 +1800,20 @@ func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { } stream.SetBuffer(append(stream.Buffer(), m...)) stream.WriteMore() - if s.Point.H == nil { + if s.H == nil { stream.WriteObjectField(`value`) } else { stream.WriteObjectField(`histogram`) } - marshalPointJSON(unsafe.Pointer(&s.Point), stream) + stream.WriteArrayStart() + jsonutil.MarshalTimestamp(s.T, stream) + stream.WriteMore() + if s.H == nil { + jsonutil.MarshalFloat(s.F, stream) + } else { + jsonutil.MarshalHistogram(s.H, stream) + } + stream.WriteArrayEnd() stream.WriteObjectEnd() } @@ -1828,17 +1821,23 @@ func marshalSampleJSONIsEmpty(ptr unsafe.Pointer) bool { return false } -// marshalPointJSON writes `[ts, "val"]`. -func marshalPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { - p := *((*promql.Point)(ptr)) +// marshalFPointJSON writes `[ts, "1.234"]`. +func marshalFPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { + p := *((*promql.FPoint)(ptr)) stream.WriteArrayStart() jsonutil.MarshalTimestamp(p.T, stream) stream.WriteMore() - if p.H == nil { - jsonutil.MarshalValue(p.V, stream) - } else { - marshalHistogram(p.H, stream) - } + jsonutil.MarshalFloat(p.F, stream) + stream.WriteArrayEnd() +} + +// marshalHPointJSON writes `[ts, { < histogram, see jsonutil.MarshalHistogram > } ]`. +func marshalHPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { + p := *((*promql.HPoint)(ptr)) + stream.WriteArrayStart() + jsonutil.MarshalTimestamp(p.T, stream) + stream.WriteMore() + jsonutil.MarshalHistogram(p.H, stream) stream.WriteArrayEnd() } @@ -1846,78 +1845,6 @@ func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool { return false } -// marshalHistogramJSON writes something like: -// -// { -// "count": "42", -// "sum": "34593.34", -// "buckets": [ -// [ 3, "-0.25", "0.25", "3"], -// [ 0, "0.25", "0.5", "12"], -// [ 0, "0.5", "1", "21"], -// [ 0, "2", "4", "6"] -// ] -// } -// -// The 1st element in each bucket array determines if the boundaries are -// inclusive (AKA closed) or exclusive (AKA open): -// -// 0: lower exclusive, upper inclusive -// 1: lower inclusive, upper exclusive -// 2: both exclusive -// 3: both inclusive -// -// The 2nd and 3rd elements are the lower and upper boundary. The 4th element is -// the bucket count. -func marshalHistogram(h *histogram.FloatHistogram, stream *jsoniter.Stream) { - stream.WriteObjectStart() - stream.WriteObjectField(`count`) - jsonutil.MarshalValue(h.Count, stream) - stream.WriteMore() - stream.WriteObjectField(`sum`) - jsonutil.MarshalValue(h.Sum, stream) - - bucketFound := false - it := h.AllBucketIterator() - for it.Next() { - bucket := it.At() - if bucket.Count == 0 { - continue // No need to expose empty buckets in JSON. - } - stream.WriteMore() - if !bucketFound { - stream.WriteObjectField(`buckets`) - stream.WriteArrayStart() - } - bucketFound = true - boundaries := 2 // Exclusive on both sides AKA open interval. - if bucket.LowerInclusive { - if bucket.UpperInclusive { - boundaries = 3 // Inclusive on both sides AKA closed interval. - } else { - boundaries = 1 // Inclusive only on lower end AKA right open. - } - } else { - if bucket.UpperInclusive { - boundaries = 0 // Inclusive only on upper end AKA left open. - } - } - stream.WriteArrayStart() - stream.WriteInt(boundaries) - stream.WriteMore() - jsonutil.MarshalValue(bucket.Lower, stream) - stream.WriteMore() - jsonutil.MarshalValue(bucket.Upper, stream) - stream.WriteMore() - jsonutil.MarshalValue(bucket.Count, stream) - stream.WriteArrayEnd() - } - if bucketFound { - stream.WriteArrayEnd() - } - stream.WriteObjectEnd() -} - // marshalExemplarJSON writes. // // { @@ -1941,7 +1868,7 @@ func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { // "value" key. stream.WriteMore() stream.WriteObjectField(`value`) - jsonutil.MarshalValue(p.Value, stream) + jsonutil.MarshalFloat(p.Value, stream) // "timestamp" key. stream.WriteMore() diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 4a1e4d67d..efce04221 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -1102,10 +1102,10 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E ResultType: parser.ValueTypeMatrix, Result: promql.Matrix{ promql.Series{ - Points: []promql.Point{ - {V: 0, T: timestamp.FromTime(start)}, - {V: 1, T: timestamp.FromTime(start.Add(1 * time.Second))}, - {V: 2, T: timestamp.FromTime(start.Add(2 * time.Second))}, + Floats: []promql.FPoint{ + {F: 0, T: timestamp.FromTime(start)}, + {F: 1, T: timestamp.FromTime(start.Add(1 * time.Second))}, + {F: 2, T: timestamp.FromTime(start.Add(2 * time.Second))}, }, // No Metric returned - use zero value for comparison. }, @@ -3059,7 +3059,7 @@ func TestRespond(t *testing.T) { ResultType: parser.ValueTypeMatrix, Result: promql.Matrix{ promql.Series{ - Points: []promql.Point{{V: 1, T: 1000}}, + Floats: []promql.FPoint{{F: 1, T: 1000}}, Metric: labels.FromStrings("__name__", "foo"), }, }, @@ -3071,7 +3071,7 @@ func TestRespond(t *testing.T) { ResultType: parser.ValueTypeMatrix, Result: promql.Matrix{ promql.Series{ - Points: []promql.Point{{H: &histogram.FloatHistogram{ + Histograms: []promql.HPoint{{H: &histogram.FloatHistogram{ Schema: 2, ZeroThreshold: 0.001, ZeroCount: 12, @@ -3094,63 +3094,63 @@ func TestRespond(t *testing.T) { expected: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"foo"},"histograms":[[1,{"count":"10","sum":"20","buckets":[[1,"-1.6817928305074288","-1.414213562373095","1"],[1,"-1.414213562373095","-1.189207115002721","2"],[3,"-0.001","0.001","12"],[0,"1.414213562373095","1.6817928305074288","1"],[0,"1.6817928305074288","2","2"],[0,"2.378414230005442","2.82842712474619","2"],[0,"2.82842712474619","3.3635856610148576","1"],[0,"3.3635856610148576","4","1"]]}]]}]}}`, }, { - response: promql.Point{V: 0, T: 0}, + response: promql.FPoint{F: 0, T: 0}, expected: `{"status":"success","data":[0,"0"]}`, }, { - response: promql.Point{V: 20, T: 1}, + response: promql.FPoint{F: 20, T: 1}, expected: `{"status":"success","data":[0.001,"20"]}`, }, { - response: promql.Point{V: 20, T: 10}, + response: promql.FPoint{F: 20, T: 10}, expected: `{"status":"success","data":[0.010,"20"]}`, }, { - response: promql.Point{V: 20, T: 100}, + response: promql.FPoint{F: 20, T: 100}, expected: `{"status":"success","data":[0.100,"20"]}`, }, { - response: promql.Point{V: 20, T: 1001}, + response: promql.FPoint{F: 20, T: 1001}, expected: `{"status":"success","data":[1.001,"20"]}`, }, { - response: promql.Point{V: 20, T: 1010}, + response: promql.FPoint{F: 20, T: 1010}, expected: `{"status":"success","data":[1.010,"20"]}`, }, { - response: promql.Point{V: 20, T: 1100}, + response: promql.FPoint{F: 20, T: 1100}, expected: `{"status":"success","data":[1.100,"20"]}`, }, { - response: promql.Point{V: 20, T: 12345678123456555}, + response: promql.FPoint{F: 20, T: 12345678123456555}, expected: `{"status":"success","data":[12345678123456.555,"20"]}`, }, { - response: promql.Point{V: 20, T: -1}, + response: promql.FPoint{F: 20, T: -1}, expected: `{"status":"success","data":[-0.001,"20"]}`, }, { - response: promql.Point{V: math.NaN(), T: 0}, + response: promql.FPoint{F: math.NaN(), T: 0}, expected: `{"status":"success","data":[0,"NaN"]}`, }, { - response: promql.Point{V: math.Inf(1), T: 0}, + response: promql.FPoint{F: math.Inf(1), T: 0}, expected: `{"status":"success","data":[0,"+Inf"]}`, }, { - response: promql.Point{V: math.Inf(-1), T: 0}, + response: promql.FPoint{F: math.Inf(-1), T: 0}, expected: `{"status":"success","data":[0,"-Inf"]}`, }, { - response: promql.Point{V: 1.2345678e6, T: 0}, + response: promql.FPoint{F: 1.2345678e6, T: 0}, expected: `{"status":"success","data":[0,"1234567.8"]}`, }, { - response: promql.Point{V: 1.2345678e-6, T: 0}, + response: promql.FPoint{F: 1.2345678e-6, T: 0}, expected: `{"status":"success","data":[0,"0.0000012345678"]}`, }, { - response: promql.Point{V: 1.2345678e-67, T: 0}, + response: promql.FPoint{F: 1.2345678e-67, T: 0}, expected: `{"status":"success","data":[0,"1.2345678e-67"]}`, }, { @@ -3283,15 +3283,15 @@ var testResponseWriter = httptest.ResponseRecorder{} func BenchmarkRespond(b *testing.B) { b.ReportAllocs() - points := []promql.Point{} + points := []promql.FPoint{} for i := 0; i < 10000; i++ { - points = append(points, promql.Point{V: float64(i * 1000000), T: int64(i)}) + points = append(points, promql.FPoint{F: float64(i * 1000000), T: int64(i)}) } response := &queryData{ ResultType: parser.ValueTypeMatrix, Result: promql.Matrix{ promql.Series{ - Points: points, + Floats: points, Metric: labels.EmptyLabels(), }, }, diff --git a/web/federate.go b/web/federate.go index 93526771b..ad2574e80 100644 --- a/web/federate.go +++ b/web/federate.go @@ -145,7 +145,9 @@ Loop: vec = append(vec, promql.Sample{ Metric: s.Labels(), - Point: promql.Point{T: t, V: v, H: fh}, + T: t, + F: v, + H: fh, }) } if ws := set.Warnings(); len(ws) > 0 { @@ -262,7 +264,7 @@ Loop: if !isHistogram { lastHistogramWasGauge = false protMetric.Untyped = &dto.Untyped{ - Value: proto.Float64(s.V), + Value: proto.Float64(s.F), } } else { lastHistogramWasGauge = s.H.CounterResetHint == histogram.GaugeType diff --git a/web/federate_test.go b/web/federate_test.go index 944b0f1ec..76d4b9cf6 100644 --- a/web/federate_test.go +++ b/web/federate_test.go @@ -342,14 +342,16 @@ func TestFederationWithNativeHistograms(t *testing.T) { if i%3 == 0 { _, err = app.Append(0, l, 100*60*1000, float64(i*100)) expVec = append(expVec, promql.Sample{ - Point: promql.Point{T: 100 * 60 * 1000, V: float64(i * 100)}, + T: 100 * 60 * 1000, + F: float64(i * 100), Metric: expL, }) } else { hist.ZeroCount++ _, err = app.AppendHistogram(0, l, 100*60*1000, hist.Copy(), nil) expVec = append(expVec, promql.Sample{ - Point: promql.Point{T: 100 * 60 * 1000, H: hist.ToFloat()}, + T: 100 * 60 * 1000, + H: hist.ToFloat(), Metric: expL, }) } @@ -379,6 +381,7 @@ func TestFederationWithNativeHistograms(t *testing.T) { p := textparse.NewProtobufParser(body) var actVec promql.Vector metricFamilies := 0 + l := labels.Labels{} for { et, err := p.Next() if err == io.EOF { @@ -389,23 +392,23 @@ func TestFederationWithNativeHistograms(t *testing.T) { metricFamilies++ } if et == textparse.EntryHistogram || et == textparse.EntrySeries { - l := labels.Labels{} p.Metric(&l) - actVec = append(actVec, promql.Sample{Metric: l}) } if et == textparse.EntryHistogram { _, parsedTimestamp, h, fh := p.Histogram() require.Nil(t, h) - actVec[len(actVec)-1].Point = promql.Point{ - T: *parsedTimestamp, - H: fh, - } + actVec = append(actVec, promql.Sample{ + T: *parsedTimestamp, + H: fh, + Metric: l, + }) } else if et == textparse.EntrySeries { - _, parsedTimestamp, v := p.Series() - actVec[len(actVec)-1].Point = promql.Point{ - T: *parsedTimestamp, - V: v, - } + _, parsedTimestamp, f := p.Series() + actVec = append(actVec, promql.Sample{ + T: *parsedTimestamp, + F: f, + Metric: l, + }) } } From 630bcb494b1828e07a1b9744d5c9d938e42ee908 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 8 Dec 2022 13:31:08 +0100 Subject: [PATCH 069/632] storage: Use separate sample types for histogram vs. float MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, we had one “polymorphous” `sample` type in the `storage` package. This commit breaks it up into `fSample`, `hSample`, and `fhSample`, each still implementing the `tsdbutil.Sample` interface. This reduces allocations in `sampleRing.Add` but inflicts the penalty of the interface wrapper, which makes things worse in total. This commit therefore just demonstrates the step taken. The next commit will tackle the interface overhead problem. Signed-off-by: beorn7 --- storage/buffer.go | 142 +++++++----- storage/buffer_test.go | 46 ++-- storage/memoized_iterator_test.go | 16 +- storage/merge_test.go | 350 +++++++++++++++--------------- storage/series.go | 21 +- storage/series_test.go | 32 +-- tsdb/tsdbutil/chunks.go | 2 +- web/federate.go | 24 +- 8 files changed, 342 insertions(+), 291 deletions(-) diff --git a/storage/buffer.go b/storage/buffer.go index 92767cdd7..68210facc 100644 --- a/storage/buffer.go +++ b/storage/buffer.go @@ -19,6 +19,7 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/tsdbutil" ) // BufferedSeriesIterator wraps an iterator with a look-back buffer. @@ -68,11 +69,8 @@ func (b *BufferedSeriesIterator) ReduceDelta(delta int64) bool { // PeekBack returns the nth previous element of the iterator. If there is none buffered, // ok is false. -func (b *BufferedSeriesIterator) PeekBack(n int) ( - t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, ok bool, -) { - s, ok := b.buf.nthLast(n) - return s.t, s.v, s.h, s.fh, ok +func (b *BufferedSeriesIterator) PeekBack(n int) (sample tsdbutil.Sample, ok bool) { + return b.buf.nthLast(n) } // Buffer returns an iterator over the buffered data. Invalidates previously @@ -122,14 +120,14 @@ func (b *BufferedSeriesIterator) Next() chunkenc.ValueType { case chunkenc.ValNone: return chunkenc.ValNone case chunkenc.ValFloat: - t, v := b.it.At() - b.buf.add(sample{t: t, v: v}) + t, f := b.it.At() + b.buf.add(fSample{t: t, f: f}) case chunkenc.ValHistogram: t, h := b.it.AtHistogram() - b.buf.add(sample{t: t, h: h}) + b.buf.add(hSample{t: t, h: h}) case chunkenc.ValFloatHistogram: t, fh := b.it.AtFloatHistogram() - b.buf.add(sample{t: t, fh: fh}) + b.buf.add(fhSample{t: t, fh: fh}) default: panic(fmt.Errorf("BufferedSeriesIterator: unknown value type %v", b.valueType)) } @@ -166,54 +164,94 @@ func (b *BufferedSeriesIterator) Err() error { return b.it.Err() } -// TODO(beorn7): Consider having different sample types for different value types. -type sample struct { - t int64 - v float64 - h *histogram.Histogram - fh *histogram.FloatHistogram +type fSample struct { + t int64 + f float64 } -func (s sample) T() int64 { +func (s fSample) T() int64 { return s.t } -func (s sample) V() float64 { - return s.v +func (s fSample) V() float64 { + return s.f } -func (s sample) H() *histogram.Histogram { +func (s fSample) H() *histogram.Histogram { + panic("H() called for fSample") +} + +func (s fSample) FH() *histogram.FloatHistogram { + panic("FH() called for fSample") +} + +func (s fSample) Type() chunkenc.ValueType { + return chunkenc.ValFloat +} + +type hSample struct { + t int64 + h *histogram.Histogram +} + +func (s hSample) T() int64 { + return s.t +} + +func (s hSample) V() float64 { + panic("F() called for hSample") +} + +func (s hSample) H() *histogram.Histogram { return s.h } -func (s sample) FH() *histogram.FloatHistogram { +func (s hSample) FH() *histogram.FloatHistogram { + return s.h.ToFloat() +} + +func (s hSample) Type() chunkenc.ValueType { + return chunkenc.ValHistogram +} + +type fhSample struct { + t int64 + fh *histogram.FloatHistogram +} + +func (s fhSample) T() int64 { + return s.t +} + +func (s fhSample) V() float64 { + panic("F() called for fhSample") +} + +func (s fhSample) H() *histogram.Histogram { + panic("H() called for fhSample") +} + +func (s fhSample) FH() *histogram.FloatHistogram { return s.fh } -func (s sample) Type() chunkenc.ValueType { - switch { - case s.h != nil: - return chunkenc.ValHistogram - case s.fh != nil: - return chunkenc.ValFloatHistogram - default: - return chunkenc.ValFloat - } +func (s fhSample) Type() chunkenc.ValueType { + return chunkenc.ValFloatHistogram } type sampleRing struct { delta int64 - buf []sample // lookback buffer - i int // position of most recent element in ring buffer - f int // position of first element in ring buffer - l int // number of elements in buffer + buf []tsdbutil.Sample // lookback buffer + i int // position of most recent element in ring buffer + f int // position of first element in ring buffer + l int // number of elements in buffer it sampleRingIterator } func newSampleRing(delta int64, sz int) *sampleRing { - r := &sampleRing{delta: delta, buf: make([]sample, sz)} + r := &sampleRing{delta: delta, buf: make([]tsdbutil.Sample, sz)} r.reset() return r @@ -247,16 +285,16 @@ func (it *sampleRingIterator) Next() chunkenc.ValueType { return chunkenc.ValNone } s := it.r.at(it.i) - it.t = s.t - switch { - case s.h != nil: - it.h = s.h + it.t = s.T() + switch s.Type() { + case chunkenc.ValHistogram: + it.h = s.H() return chunkenc.ValHistogram - case s.fh != nil: - it.fh = s.fh + case chunkenc.ValFloatHistogram: + it.fh = s.FH() return chunkenc.ValFloatHistogram default: - it.v = s.v + it.v = s.V() return chunkenc.ValFloat } } @@ -288,18 +326,18 @@ func (it *sampleRingIterator) AtT() int64 { return it.t } -func (r *sampleRing) at(i int) sample { +func (r *sampleRing) at(i int) tsdbutil.Sample { j := (r.f + i) % len(r.buf) return r.buf[j] } // add adds a sample to the ring buffer and frees all samples that fall // out of the delta range. -func (r *sampleRing) add(s sample) { +func (r *sampleRing) add(s tsdbutil.Sample) { l := len(r.buf) // Grow the ring buffer if it fits no more elements. if l == r.l { - buf := make([]sample, 2*l) + buf := make([]tsdbutil.Sample, 2*l) copy(buf[l+r.f:], r.buf[r.f:]) copy(buf, r.buf[:r.f]) @@ -318,8 +356,8 @@ func (r *sampleRing) add(s sample) { r.l++ // Free head of the buffer of samples that just fell out of the range. - tmin := s.t - r.delta - for r.buf[r.f].t < tmin { + tmin := s.T() - r.delta + for r.buf[r.f].T() < tmin { r.f++ if r.f >= l { r.f -= l @@ -342,8 +380,8 @@ func (r *sampleRing) reduceDelta(delta int64) bool { // Free head of the buffer of samples that just fell out of the range. l := len(r.buf) - tmin := r.buf[r.i].t - delta - for r.buf[r.f].t < tmin { + tmin := r.buf[r.i].T() - delta + for r.buf[r.f].T() < tmin { r.f++ if r.f >= l { r.f -= l @@ -354,15 +392,15 @@ func (r *sampleRing) reduceDelta(delta int64) bool { } // nthLast returns the nth most recent element added to the ring. -func (r *sampleRing) nthLast(n int) (sample, bool) { +func (r *sampleRing) nthLast(n int) (tsdbutil.Sample, bool) { if n > r.l { - return sample{}, false + return fSample{}, false } return r.at(r.l - n), true } -func (r *sampleRing) samples() []sample { - res := make([]sample, r.l) +func (r *sampleRing) samples() []tsdbutil.Sample { + res := make([]tsdbutil.Sample, r.l) k := r.f + r.l var j int diff --git a/storage/buffer_test.go b/storage/buffer_test.go index 44d11f0ed..7cfda4d4c 100644 --- a/storage/buffer_test.go +++ b/storage/buffer_test.go @@ -58,11 +58,11 @@ func TestSampleRing(t *testing.T) { for _, c := range cases { r := newSampleRing(c.delta, c.size) - input := []sample{} + input := []fSample{} for _, t := range c.input { - input = append(input, sample{ + input = append(input, fSample{ t: t, - v: float64(rand.Intn(100)), + f: float64(rand.Intn(100)), }) } @@ -73,7 +73,7 @@ func TestSampleRing(t *testing.T) { for _, sold := range input[:i] { found := false for _, bs := range buffered { - if bs.t == sold.t && bs.v == sold.v { + if bs.T() == sold.t && bs.V() == sold.f { found = true break } @@ -92,12 +92,12 @@ func TestSampleRing(t *testing.T) { func TestBufferedSeriesIterator(t *testing.T) { var it *BufferedSeriesIterator - bufferEq := func(exp []sample) { - var b []sample + bufferEq := func(exp []fSample) { + var b []fSample bit := it.Buffer() for bit.Next() == chunkenc.ValFloat { - t, v := bit.At() - b = append(b, sample{t: t, v: v}) + t, f := bit.At() + b = append(b, fSample{t: t, f: f}) } require.Equal(t, exp, b, "buffer mismatch") } @@ -107,21 +107,21 @@ func TestBufferedSeriesIterator(t *testing.T) { require.Equal(t, ev, v, "value mismatch") } prevSampleEq := func(ets int64, ev float64, eok bool) { - ts, v, _, _, ok := it.PeekBack(1) + s, ok := it.PeekBack(1) require.Equal(t, eok, ok, "exist mismatch") - require.Equal(t, ets, ts, "timestamp mismatch") - require.Equal(t, ev, v, "value mismatch") + require.Equal(t, ets, s.T(), "timestamp mismatch") + require.Equal(t, ev, s.V(), "value mismatch") } it = NewBufferIterator(NewListSeriesIterator(samples{ - sample{t: 1, v: 2}, - sample{t: 2, v: 3}, - sample{t: 3, v: 4}, - sample{t: 4, v: 5}, - sample{t: 5, v: 6}, - sample{t: 99, v: 8}, - sample{t: 100, v: 9}, - sample{t: 101, v: 10}, + fSample{t: 1, f: 2}, + fSample{t: 2, f: 3}, + fSample{t: 3, f: 4}, + fSample{t: 4, f: 5}, + fSample{t: 5, f: 6}, + fSample{t: 99, f: 8}, + fSample{t: 100, f: 9}, + fSample{t: 101, f: 10}, }), 2) require.Equal(t, chunkenc.ValFloat, it.Seek(-123), "seek failed") @@ -132,24 +132,24 @@ func TestBufferedSeriesIterator(t *testing.T) { require.Equal(t, chunkenc.ValFloat, it.Next(), "next failed") sampleEq(2, 3) prevSampleEq(1, 2, true) - bufferEq([]sample{{t: 1, v: 2}}) + bufferEq([]fSample{{t: 1, f: 2}}) require.Equal(t, chunkenc.ValFloat, it.Next(), "next failed") require.Equal(t, chunkenc.ValFloat, it.Next(), "next failed") require.Equal(t, chunkenc.ValFloat, it.Next(), "next failed") sampleEq(5, 6) prevSampleEq(4, 5, true) - bufferEq([]sample{{t: 2, v: 3}, {t: 3, v: 4}, {t: 4, v: 5}}) + bufferEq([]fSample{{t: 2, f: 3}, {t: 3, f: 4}, {t: 4, f: 5}}) require.Equal(t, chunkenc.ValFloat, it.Seek(5), "seek failed") sampleEq(5, 6) prevSampleEq(4, 5, true) - bufferEq([]sample{{t: 2, v: 3}, {t: 3, v: 4}, {t: 4, v: 5}}) + bufferEq([]fSample{{t: 2, f: 3}, {t: 3, f: 4}, {t: 4, f: 5}}) require.Equal(t, chunkenc.ValFloat, it.Seek(101), "seek failed") sampleEq(101, 10) prevSampleEq(100, 9, true) - bufferEq([]sample{{t: 99, v: 8}, {t: 100, v: 9}}) + bufferEq([]fSample{{t: 99, f: 8}, {t: 100, f: 9}}) require.Equal(t, chunkenc.ValNone, it.Next(), "next succeeded unexpectedly") require.Equal(t, chunkenc.ValNone, it.Seek(1024), "seek succeeded unexpectedly") diff --git a/storage/memoized_iterator_test.go b/storage/memoized_iterator_test.go index 6bdd395e0..382c84e63 100644 --- a/storage/memoized_iterator_test.go +++ b/storage/memoized_iterator_test.go @@ -38,14 +38,14 @@ func TestMemoizedSeriesIterator(t *testing.T) { } it = NewMemoizedIterator(NewListSeriesIterator(samples{ - sample{t: 1, v: 2}, - sample{t: 2, v: 3}, - sample{t: 3, v: 4}, - sample{t: 4, v: 5}, - sample{t: 5, v: 6}, - sample{t: 99, v: 8}, - sample{t: 100, v: 9}, - sample{t: 101, v: 10}, + fSample{t: 1, f: 2}, + fSample{t: 2, f: 3}, + fSample{t: 3, f: 4}, + fSample{t: 4, f: 5}, + fSample{t: 5, f: 6}, + fSample{t: 99, f: 8}, + fSample{t: 100, f: 9}, + fSample{t: 101, f: 10}, }), 2) require.Equal(t, it.Seek(-123), chunkenc.ValFloat, "seek failed") diff --git a/storage/merge_test.go b/storage/merge_test.go index 6abf8723d..b0544c2d8 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -62,116 +62,116 @@ func TestMergeQuerierWithChainMerger(t *testing.T) { { name: "one querier, two series", querierSeries: [][]Series{{ - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}), + NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), + NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }}, expected: NewMockSeriesSet( - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}), + NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), + NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), ), }, { name: "two queriers, one different series each", querierSeries: [][]Series{{ - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}), + NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), }, { - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}), + NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }}, expected: NewMockSeriesSet( - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}), + NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), + NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), ), }, { name: "two time unsorted queriers, two series each", querierSeries: [][]Series{{ - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5, nil, nil}, sample{6, 6, nil, nil}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}), + NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{5, 5}, fSample{6, 6}}), + NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }, { - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{4, 4, nil, nil}}), + NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), + NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}, fSample{4, 4}}), }}, expected: NewMockSeriesSet( NewListSeries( labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}, sample{6, 6, nil, nil}}, + []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}}, ), NewListSeries( labels.FromStrings("foo", "bar"), - []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{4, 4, nil, nil}}, + []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}}, ), ), }, { name: "five queriers, only two queriers have two time unsorted series each", querierSeries: [][]Series{{}, {}, { - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5, nil, nil}, sample{6, 6, nil, nil}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}), + NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{5, 5}, fSample{6, 6}}), + NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }, { - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{4, 4, nil, nil}}), + NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), + NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}, fSample{4, 4}}), }, {}}, expected: NewMockSeriesSet( NewListSeries( labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}, sample{6, 6, nil, nil}}, + []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}}, ), NewListSeries( labels.FromStrings("foo", "bar"), - []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{4, 4, nil, nil}}, + []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}}, ), ), }, { name: "two queriers, only two queriers have two time unsorted series each, with 3 noop and one nil querier together", querierSeries: [][]Series{{}, {}, { - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5, nil, nil}, sample{6, 6, nil, nil}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}), + NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{5, 5}, fSample{6, 6}}), + NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }, { - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{4, 4, nil, nil}}), + NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), + NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}, fSample{4, 4}}), }, {}}, extraQueriers: []Querier{NoopQuerier(), NoopQuerier(), nil, NoopQuerier()}, expected: NewMockSeriesSet( NewListSeries( labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}, sample{6, 6, nil, nil}}, + []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}}, ), NewListSeries( labels.FromStrings("foo", "bar"), - []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{4, 4, nil, nil}}, + []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}}, ), ), }, { name: "two queriers, with two series, one is overlapping", querierSeries: [][]Series{{}, {}, { - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 21, nil, nil}, sample{3, 31, nil, nil}, sample{5, 5, nil, nil}, sample{6, 6, nil, nil}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}), + NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 21}, fSample{3, 31}, fSample{5, 5}, fSample{6, 6}}), + NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }, { - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 22, nil, nil}, sample{3, 32, nil, nil}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{4, 4, nil, nil}}), + NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 22}, fSample{3, 32}}), + NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}, fSample{4, 4}}), }, {}}, expected: NewMockSeriesSet( NewListSeries( labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 21, nil, nil}, sample{3, 31, nil, nil}, sample{5, 5, nil, nil}, sample{6, 6, nil, nil}}, + []tsdbutil.Sample{fSample{1, 1}, fSample{2, 21}, fSample{3, 31}, fSample{5, 5}, fSample{6, 6}}, ), NewListSeries( labels.FromStrings("foo", "bar"), - []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{4, 4, nil, nil}}, + []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}}, ), ), }, { name: "two queries, one with NaN samples series", querierSeries: [][]Series{{ - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, math.NaN(), nil, nil}}), + NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, math.NaN()}}), }, { - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{1, 1, nil, nil}}), + NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{1, 1}}), }}, expected: NewMockSeriesSet( - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, math.NaN(), nil, nil}, sample{1, 1, nil, nil}}), + NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, math.NaN()}, fSample{1, 1}}), ), }, } { @@ -245,108 +245,108 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) { { name: "one querier, two series", chkQuerierSeries: [][]ChunkSeries{{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}, []tsdbutil.Sample{sample{2, 2, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}), }}, expected: NewMockChunkSeriesSet( - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}, []tsdbutil.Sample{sample{2, 2, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}), ), }, { name: "two secondaries, one different series each", chkQuerierSeries: [][]ChunkSeries{{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), }, { - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}, []tsdbutil.Sample{sample{2, 2, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}), }}, expected: NewMockChunkSeriesSet( - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}, []tsdbutil.Sample{sample{2, 2, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}), ), }, { name: "two secondaries, two not in time order series each", chkQuerierSeries: [][]ChunkSeries{{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5, nil, nil}}, []tsdbutil.Sample{sample{6, 6, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}, []tsdbutil.Sample{sample{2, 2, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{5, 5}}, []tsdbutil.Sample{fSample{6, 6}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}), }, { - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3, nil, nil}}, []tsdbutil.Sample{sample{4, 4, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}}, []tsdbutil.Sample{fSample{4, 4}}), }}, expected: NewMockChunkSeriesSet( NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, - []tsdbutil.Sample{sample{3, 3, nil, nil}}, - []tsdbutil.Sample{sample{5, 5, nil, nil}}, - []tsdbutil.Sample{sample{6, 6, nil, nil}}, + []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, + []tsdbutil.Sample{fSample{3, 3}}, + []tsdbutil.Sample{fSample{5, 5}}, + []tsdbutil.Sample{fSample{6, 6}}, ), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), - []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}, - []tsdbutil.Sample{sample{2, 2, nil, nil}}, - []tsdbutil.Sample{sample{3, 3, nil, nil}}, - []tsdbutil.Sample{sample{4, 4, nil, nil}}, + []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, + []tsdbutil.Sample{fSample{2, 2}}, + []tsdbutil.Sample{fSample{3, 3}}, + []tsdbutil.Sample{fSample{4, 4}}, ), ), }, { name: "five secondaries, only two have two not in time order series each", chkQuerierSeries: [][]ChunkSeries{{}, {}, { - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5, nil, nil}}, []tsdbutil.Sample{sample{6, 6, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}, []tsdbutil.Sample{sample{2, 2, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{5, 5}}, []tsdbutil.Sample{fSample{6, 6}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}), }, { - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3, nil, nil}}, []tsdbutil.Sample{sample{4, 4, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}}, []tsdbutil.Sample{fSample{4, 4}}), }, {}}, expected: NewMockChunkSeriesSet( NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, - []tsdbutil.Sample{sample{3, 3, nil, nil}}, - []tsdbutil.Sample{sample{5, 5, nil, nil}}, - []tsdbutil.Sample{sample{6, 6, nil, nil}}, + []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, + []tsdbutil.Sample{fSample{3, 3}}, + []tsdbutil.Sample{fSample{5, 5}}, + []tsdbutil.Sample{fSample{6, 6}}, ), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), - []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}, - []tsdbutil.Sample{sample{2, 2, nil, nil}}, - []tsdbutil.Sample{sample{3, 3, nil, nil}}, - []tsdbutil.Sample{sample{4, 4, nil, nil}}, + []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, + []tsdbutil.Sample{fSample{2, 2}}, + []tsdbutil.Sample{fSample{3, 3}}, + []tsdbutil.Sample{fSample{4, 4}}, ), ), }, { name: "two secondaries, with two not in time order series each, with 3 noop queries and one nil together", chkQuerierSeries: [][]ChunkSeries{{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5, nil, nil}}, []tsdbutil.Sample{sample{6, 6, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}, []tsdbutil.Sample{sample{2, 2, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{5, 5}}, []tsdbutil.Sample{fSample{6, 6}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}), }, { - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3, nil, nil}}, []tsdbutil.Sample{sample{4, 4, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}}, []tsdbutil.Sample{fSample{4, 4}}), }}, extraQueriers: []ChunkQuerier{NoopChunkedQuerier(), NoopChunkedQuerier(), nil, NoopChunkedQuerier()}, expected: NewMockChunkSeriesSet( NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, - []tsdbutil.Sample{sample{3, 3, nil, nil}}, - []tsdbutil.Sample{sample{5, 5, nil, nil}}, - []tsdbutil.Sample{sample{6, 6, nil, nil}}, + []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, + []tsdbutil.Sample{fSample{3, 3}}, + []tsdbutil.Sample{fSample{5, 5}}, + []tsdbutil.Sample{fSample{6, 6}}, ), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), - []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}, - []tsdbutil.Sample{sample{2, 2, nil, nil}}, - []tsdbutil.Sample{sample{3, 3, nil, nil}}, - []tsdbutil.Sample{sample{4, 4, nil, nil}}, + []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, + []tsdbutil.Sample{fSample{2, 2}}, + []tsdbutil.Sample{fSample{3, 3}}, + []tsdbutil.Sample{fSample{4, 4}}, ), ), }, { name: "two queries, one with NaN samples series", chkQuerierSeries: [][]ChunkSeries{{ - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, math.NaN(), nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, math.NaN()}}), }, { - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{1, 1, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{1, 1}}), }}, expected: NewMockChunkSeriesSet( - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, math.NaN(), nil, nil}}, []tsdbutil.Sample{sample{1, 1, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, math.NaN()}}, []tsdbutil.Sample{fSample{1, 1}}), ), }, } { @@ -385,12 +385,12 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { m := NewCompactingChunkSeriesMerger(ChainedSeriesMerge) // histogramSample returns a histogram that is unique to the ts. - histogramSample := func(ts int64) sample { - return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts + 1))} + histogramSample := func(ts int64) hSample { + return hSample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts + 1))} } - floatHistogramSample := func(ts int64) sample { - return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(ts + 1))} + floatHistogramSample := func(ts int64) fhSample { + return fhSample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(ts + 1))} } for _, tc := range []struct { @@ -408,9 +408,9 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { { name: "single series", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), }, - expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}), + expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), }, { name: "two empty series", @@ -423,55 +423,55 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { { name: "two non overlapping", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{7, 7, nil, nil}, sample{9, 9, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{5, 5}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}), }, - expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}, []tsdbutil.Sample{sample{7, 7, nil, nil}, sample{9, 9, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}}), + expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{5, 5}}, []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}), }, { name: "two overlapping", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{8, 8, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{7, 7, nil, nil}, sample{9, 9, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{8, 8}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}), }, - expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{7, 7, nil, nil}, sample{8, 8, nil, nil}, sample{9, 9, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}}), + expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{7, 7}, fSample{8, 8}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}), }, { name: "two duplicated", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), }, - expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}), + expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), }, { name: "three overlapping", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{6, 6, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{4, 4, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{6, 6}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{0, 0}, fSample{4, 4}}), }, - expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{4, 4, nil, nil}, sample{5, 5, nil, nil}, sample{6, 6, nil, nil}}), + expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}, fSample{6, 6}}), }, { name: "three in chained overlap", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{4, 4, nil, nil}, sample{6, 66, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{6, 6, nil, nil}, sample{10, 10, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{4, 4}, fSample{6, 66}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{6, 6}, fSample{10, 10}}), }, - expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{4, 4, nil, nil}, sample{5, 5, nil, nil}, sample{6, 66, nil, nil}, sample{10, 10, nil, nil}}), + expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}, fSample{6, 66}, fSample{10, 10}}), }, { name: "three in chained overlap complex", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{5, 5, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}, sample{15, 15, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{20, 20, nil, nil}}, []tsdbutil.Sample{sample{25, 25, nil, nil}, sample{30, 30, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{18, 18, nil, nil}, sample{26, 26, nil, nil}}, []tsdbutil.Sample{sample{31, 31, nil, nil}, sample{35, 35, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{0, 0}, fSample{5, 5}}, []tsdbutil.Sample{fSample{10, 10}, fSample{15, 15}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 2}, fSample{20, 20}}, []tsdbutil.Sample{fSample{25, 25}, fSample{30, 30}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{18, 18}, fSample{26, 26}}, []tsdbutil.Sample{fSample{31, 31}, fSample{35, 35}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{2, 2, nil, nil}, sample{5, 5, nil, nil}, sample{10, 10, nil, nil}, sample{15, 15, nil, nil}, sample{18, 18, nil, nil}, sample{20, 20, nil, nil}, sample{25, 25, nil, nil}, sample{26, 26, nil, nil}, sample{30, 30, nil, nil}}, - []tsdbutil.Sample{sample{31, 31, nil, nil}, sample{35, 35, nil, nil}}, + []tsdbutil.Sample{fSample{0, 0}, fSample{2, 2}, fSample{5, 5}, fSample{10, 10}, fSample{15, 15}, fSample{18, 18}, fSample{20, 20}, fSample{25, 25}, fSample{26, 26}, fSample{30, 30}}, + []tsdbutil.Sample{fSample{31, 31}, fSample{35, 35}}, ), }, { @@ -511,13 +511,13 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { name: "histogram chunks overlapping with float chunks", input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{histogramSample(0), histogramSample(5)}, []tsdbutil.Sample{histogramSample(10), histogramSample(15)}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{12, 12, nil, nil}}, []tsdbutil.Sample{sample{14, 14, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{12, 12}}, []tsdbutil.Sample{fSample{14, 14}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{histogramSample(0)}, - []tsdbutil.Sample{sample{1, 1, nil, nil}}, + []tsdbutil.Sample{fSample{1, 1}}, []tsdbutil.Sample{histogramSample(5), histogramSample(10)}, - []tsdbutil.Sample{sample{12, 12, nil, nil}, sample{14, 14, nil, nil}}, + []tsdbutil.Sample{fSample{12, 12}, fSample{14, 14}}, []tsdbutil.Sample{histogramSample(15)}, ), }, @@ -537,13 +537,13 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { name: "float histogram chunks overlapping with float chunks", input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{floatHistogramSample(0), floatHistogramSample(5)}, []tsdbutil.Sample{floatHistogramSample(10), floatHistogramSample(15)}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{12, 12, nil, nil}}, []tsdbutil.Sample{sample{14, 14, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{12, 12}}, []tsdbutil.Sample{fSample{14, 14}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{floatHistogramSample(0)}, - []tsdbutil.Sample{sample{1, 1, nil, nil}}, + []tsdbutil.Sample{fSample{1, 1}}, []tsdbutil.Sample{floatHistogramSample(5), floatHistogramSample(10)}, - []tsdbutil.Sample{sample{12, 12, nil, nil}, sample{14, 14, nil, nil}}, + []tsdbutil.Sample{fSample{12, 12}, fSample{14, 14}}, []tsdbutil.Sample{floatHistogramSample(15)}, ), }, @@ -592,9 +592,9 @@ func TestConcatenatingChunkSeriesMerger(t *testing.T) { { name: "single series", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), }, - expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}), + expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), }, { name: "two empty series", @@ -607,70 +607,70 @@ func TestConcatenatingChunkSeriesMerger(t *testing.T) { { name: "two non overlapping", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{7, 7, nil, nil}, sample{9, 9, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{5, 5}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}), }, - expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}, []tsdbutil.Sample{sample{7, 7, nil, nil}, sample{9, 9, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}}), + expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{5, 5}}, []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}), }, { name: "two overlapping", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{8, 8, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{7, 7, nil, nil}, sample{9, 9, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{8, 8}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{8, 8, nil, nil}}, - []tsdbutil.Sample{sample{7, 7, nil, nil}, sample{9, 9, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}}, + []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{8, 8}}, + []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}, ), }, { name: "two duplicated", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}, - []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}, + []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}, + []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}, ), }, { name: "three overlapping", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{6, 6, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{4, 4, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{6, 6}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{0, 0}, fSample{4, 4}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}, - []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{6, 6, nil, nil}}, - []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{4, 4, nil, nil}}, + []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}, + []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{6, 6}}, + []tsdbutil.Sample{fSample{0, 0}, fSample{4, 4}}, ), }, { name: "three in chained overlap", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{4, 4, nil, nil}, sample{6, 66, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{6, 6, nil, nil}, sample{10, 10, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{4, 4}, fSample{6, 66}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{6, 6}, fSample{10, 10}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}, - []tsdbutil.Sample{sample{4, 4, nil, nil}, sample{6, 66, nil, nil}}, - []tsdbutil.Sample{sample{6, 6, nil, nil}, sample{10, 10, nil, nil}}, + []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}, + []tsdbutil.Sample{fSample{4, 4}, fSample{6, 66}}, + []tsdbutil.Sample{fSample{6, 6}, fSample{10, 10}}, ), }, { name: "three in chained overlap complex", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{5, 5, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}, sample{15, 15, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{20, 20, nil, nil}}, []tsdbutil.Sample{sample{25, 25, nil, nil}, sample{30, 30, nil, nil}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{18, 18, nil, nil}, sample{26, 26, nil, nil}}, []tsdbutil.Sample{sample{31, 31, nil, nil}, sample{35, 35, nil, nil}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{0, 0}, fSample{5, 5}}, []tsdbutil.Sample{fSample{10, 10}, fSample{15, 15}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 2}, fSample{20, 20}}, []tsdbutil.Sample{fSample{25, 25}, fSample{30, 30}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{18, 18}, fSample{26, 26}}, []tsdbutil.Sample{fSample{31, 31}, fSample{35, 35}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{5, 5, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}, sample{15, 15, nil, nil}}, - []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{20, 20, nil, nil}}, []tsdbutil.Sample{sample{25, 25, nil, nil}, sample{30, 30, nil, nil}}, - []tsdbutil.Sample{sample{18, 18, nil, nil}, sample{26, 26, nil, nil}}, []tsdbutil.Sample{sample{31, 31, nil, nil}, sample{35, 35, nil, nil}}, + []tsdbutil.Sample{fSample{0, 0}, fSample{5, 5}}, []tsdbutil.Sample{fSample{10, 10}, fSample{15, 15}}, + []tsdbutil.Sample{fSample{2, 2}, fSample{20, 20}}, []tsdbutil.Sample{fSample{25, 25}, fSample{30, 30}}, + []tsdbutil.Sample{fSample{18, 18}, fSample{26, 26}}, []tsdbutil.Sample{fSample{31, 31}, fSample{35, 35}}, ), }, { @@ -807,38 +807,38 @@ func TestChainSampleIterator(t *testing.T) { }{ { input: []chunkenc.Iterator{ - NewListSeriesIterator(samples{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}), + NewListSeriesIterator(samples{fSample{0, 0}, fSample{1, 1}}), }, - expected: []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}, + expected: []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, }, { input: []chunkenc.Iterator{ - NewListSeriesIterator(samples{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}), - NewListSeriesIterator(samples{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}), + NewListSeriesIterator(samples{fSample{0, 0}, fSample{1, 1}}), + NewListSeriesIterator(samples{fSample{2, 2}, fSample{3, 3}}), }, - expected: []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, + expected: []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}, }, { input: []chunkenc.Iterator{ - NewListSeriesIterator(samples{sample{0, 0, nil, nil}, sample{3, 3, nil, nil}}), - NewListSeriesIterator(samples{sample{1, 1, nil, nil}, sample{4, 4, nil, nil}}), - NewListSeriesIterator(samples{sample{2, 2, nil, nil}, sample{5, 5, nil, nil}}), + NewListSeriesIterator(samples{fSample{0, 0}, fSample{3, 3}}), + NewListSeriesIterator(samples{fSample{1, 1}, fSample{4, 4}}), + NewListSeriesIterator(samples{fSample{2, 2}, fSample{5, 5}}), }, expected: []tsdbutil.Sample{ - sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{4, 4, nil, nil}, sample{5, 5, nil, nil}, + fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}, }, }, // Overlap. { input: []chunkenc.Iterator{ - NewListSeriesIterator(samples{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}), - NewListSeriesIterator(samples{sample{0, 0, nil, nil}, sample{2, 2, nil, nil}}), - NewListSeriesIterator(samples{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}), + NewListSeriesIterator(samples{fSample{0, 0}, fSample{1, 1}}), + NewListSeriesIterator(samples{fSample{0, 0}, fSample{2, 2}}), + NewListSeriesIterator(samples{fSample{2, 2}, fSample{3, 3}}), NewListSeriesIterator(samples{}), NewListSeriesIterator(samples{}), NewListSeriesIterator(samples{}), }, - expected: []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, + expected: []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}, }, } { merged := ChainSampleIteratorFromIterators(nil, tc.input) @@ -856,42 +856,42 @@ func TestChainSampleIteratorSeek(t *testing.T) { }{ { input: []chunkenc.Iterator{ - NewListSeriesIterator(samples{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}), + NewListSeriesIterator(samples{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }, seek: 1, - expected: []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, + expected: []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, }, { input: []chunkenc.Iterator{ - NewListSeriesIterator(samples{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}), - NewListSeriesIterator(samples{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}), + NewListSeriesIterator(samples{fSample{0, 0}, fSample{1, 1}}), + NewListSeriesIterator(samples{fSample{2, 2}, fSample{3, 3}}), }, seek: 2, - expected: []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, + expected: []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}}, }, { input: []chunkenc.Iterator{ - NewListSeriesIterator(samples{sample{0, 0, nil, nil}, sample{3, 3, nil, nil}}), - NewListSeriesIterator(samples{sample{1, 1, nil, nil}, sample{4, 4, nil, nil}}), - NewListSeriesIterator(samples{sample{2, 2, nil, nil}, sample{5, 5, nil, nil}}), + NewListSeriesIterator(samples{fSample{0, 0}, fSample{3, 3}}), + NewListSeriesIterator(samples{fSample{1, 1}, fSample{4, 4}}), + NewListSeriesIterator(samples{fSample{2, 2}, fSample{5, 5}}), }, seek: 2, - expected: []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{4, 4, nil, nil}, sample{5, 5, nil, nil}}, + expected: []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}}, }, { input: []chunkenc.Iterator{ - NewListSeriesIterator(samples{sample{0, 0, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}), - NewListSeriesIterator(samples{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}), + NewListSeriesIterator(samples{fSample{0, 0}, fSample{2, 2}, fSample{3, 3}}), + NewListSeriesIterator(samples{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }, seek: 0, - expected: []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, + expected: []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}, }, } { merged := ChainSampleIteratorFromIterators(nil, tc.input) actual := []tsdbutil.Sample{} if merged.Seek(tc.seek) == chunkenc.ValFloat { - t, v := merged.At() - actual = append(actual, sample{t, v, nil, nil}) + t, f := merged.At() + actual = append(actual, fSample{t, f}) } s, err := ExpandSamples(merged, nil) require.NoError(t, err) @@ -906,7 +906,7 @@ func makeSeries(numSeries, numSamples int) []Series { labels := labels.FromStrings("foo", fmt.Sprintf("bar%d", j)) samples := []tsdbutil.Sample{} for k := 0; k < numSamples; k++ { - samples = append(samples, sample{t: int64(k), v: float64(k)}) + samples = append(samples, fSample{t: int64(k), f: float64(k)}) } series = append(series, NewListSeries(labels, samples)) } diff --git a/storage/series.go b/storage/series.go index dcb6dd82e..3f90f7702 100644 --- a/storage/series.go +++ b/storage/series.go @@ -376,10 +376,17 @@ func (e errChunksIterator) Err() error { return e.err } // ExpandSamples iterates over all samples in the iterator, buffering all in slice. // Optionally it takes samples constructor, useful when you want to compare sample slices with different // sample implementations. if nil, sample type from this package will be used. -func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample) ([]tsdbutil.Sample, error) { +func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample) ([]tsdbutil.Sample, error) { if newSampleFn == nil { - newSampleFn = func(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample { - return sample{t, v, h, fh} + newSampleFn = func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample { + switch { + case h != nil: + return hSample{t, h} + case fh != nil: + return fhSample{t, fh} + default: + return fSample{t, f} + } } } @@ -389,12 +396,12 @@ func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, v float64, case chunkenc.ValNone: return result, iter.Err() case chunkenc.ValFloat: - t, v := iter.At() + t, f := iter.At() // NaNs can't be compared normally, so substitute for another value. - if math.IsNaN(v) { - v = -42 + if math.IsNaN(f) { + f = -42 } - result = append(result, newSampleFn(t, v, nil, nil)) + result = append(result, newSampleFn(t, f, nil, nil)) case chunkenc.ValHistogram: t, h := iter.AtHistogram() result = append(result, newSampleFn(t, 0, h, nil)) diff --git a/storage/series_test.go b/storage/series_test.go index 1f68d23b9..210a68e28 100644 --- a/storage/series_test.go +++ b/storage/series_test.go @@ -25,11 +25,11 @@ import ( func TestListSeriesIterator(t *testing.T) { it := NewListSeriesIterator(samples{ - sample{0, 0, nil, nil}, - sample{1, 1, nil, nil}, - sample{1, 1.5, nil, nil}, - sample{2, 2, nil, nil}, - sample{3, 3, nil, nil}, + fSample{0, 0}, + fSample{1, 1}, + fSample{1, 1.5}, + fSample{2, 2}, + fSample{3, 3}, }) // Seek to the first sample with ts=1. @@ -78,20 +78,20 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) { { lbs: labels.FromStrings("__name__", "up", "instance", "localhost:8080"), samples: []tsdbutil.Sample{ - sample{t: 1, v: 1}, - sample{t: 2, v: 2}, - sample{t: 3, v: 3}, - sample{t: 4, v: 4}, + fSample{t: 1, f: 1}, + fSample{t: 2, f: 2}, + fSample{t: 3, f: 3}, + fSample{t: 4, f: 4}, }, }, { lbs: labels.FromStrings("__name__", "up", "instance", "localhost:8081"), samples: []tsdbutil.Sample{ - sample{t: 1, v: 2}, - sample{t: 2, v: 3}, - sample{t: 3, v: 4}, - sample{t: 4, v: 5}, - sample{t: 5, v: 6}, - sample{t: 6, v: 7}, + fSample{t: 1, f: 2}, + fSample{t: 2, f: 3}, + fSample{t: 3, f: 4}, + fSample{t: 4, f: 5}, + fSample{t: 5, f: 6}, + fSample{t: 6, f: 7}, }, }, } @@ -114,7 +114,7 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) { j := 0 for iter.Next() == chunkenc.ValFloat { ts, v := iter.At() - require.EqualValues(t, series[i].samples[j], sample{t: ts, v: v}) + require.EqualValues(t, series[i].samples[j], fSample{t: ts, f: v}) j++ } } diff --git a/tsdb/tsdbutil/chunks.go b/tsdb/tsdbutil/chunks.go index f9981ffe1..4e025c5e6 100644 --- a/tsdb/tsdbutil/chunks.go +++ b/tsdb/tsdbutil/chunks.go @@ -28,7 +28,7 @@ type Samples interface { type Sample interface { T() int64 - V() float64 + V() float64 // TODO(beorn7): Rename to F(). H() *histogram.Histogram FH() *histogram.FloatHistogram Type() chunkenc.ValueType diff --git a/web/federate.go b/web/federate.go index ad2574e80..15e31b1a3 100644 --- a/web/federate.go +++ b/web/federate.go @@ -115,38 +115,44 @@ Loop: var ( t int64 - v float64 - h *histogram.Histogram + f float64 fh *histogram.FloatHistogram - ok bool ) valueType := it.Seek(maxt) switch valueType { case chunkenc.ValFloat: - t, v = it.At() + t, f = it.At() case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: t, fh = it.AtFloatHistogram() default: - t, v, h, fh, ok = it.PeekBack(1) + sample, ok := it.PeekBack(1) if !ok { continue Loop } - if h != nil { - fh = h.ToFloat() + t = sample.T() + switch sample.Type() { + case chunkenc.ValFloat: + f = sample.V() + case chunkenc.ValHistogram: + fh = sample.H().ToFloat() + case chunkenc.ValFloatHistogram: + fh = sample.FH() + default: + continue Loop } } // The exposition formats do not support stale markers, so drop them. This // is good enough for staleness handling of federated data, as the // interval-based limits on staleness will do the right thing for supported // use cases (which is to say federating aggregated time series). - if value.IsStaleNaN(v) { + if value.IsStaleNaN(f) || (fh != nil && value.IsStaleNaN(fh.Sum)) { continue } vec = append(vec, promql.Sample{ Metric: s.Labels(), T: t, - F: v, + F: f, H: fh, }) } From 462240bc787793dfe62c05c335f98b84401e4da0 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Fri, 9 Dec 2022 00:44:48 +0100 Subject: [PATCH 070/632] storage: add specialized buffers to sampleRing This utilizes the fact that most sampleRings will only contain samples of one type. In this case, the generic interface is circumvented, and a bespoke buffer for the one actually occurring sample type is used. Should a sampleRing receive a sample of a different kind later, it will transparently switch to the generic behavior. Signed-off-by: beorn7 --- storage/buffer.go | 288 +++++++++++++++++++++++++++++++++++------ storage/buffer_test.go | 2 +- 2 files changed, 252 insertions(+), 38 deletions(-) diff --git a/storage/buffer.go b/storage/buffer.go index 68210facc..9a9aff2af 100644 --- a/storage/buffer.go +++ b/storage/buffer.go @@ -44,7 +44,7 @@ func NewBuffer(delta int64) *BufferedSeriesIterator { func NewBufferIterator(it chunkenc.Iterator, delta int64) *BufferedSeriesIterator { // TODO(codesome): based on encoding, allocate different buffer. bit := &BufferedSeriesIterator{ - buf: newSampleRing(delta, 16), + buf: newSampleRing(delta, 0, chunkenc.ValNone), delta: delta, } bit.Reset(it) @@ -121,13 +121,13 @@ func (b *BufferedSeriesIterator) Next() chunkenc.ValueType { return chunkenc.ValNone case chunkenc.ValFloat: t, f := b.it.At() - b.buf.add(fSample{t: t, f: f}) + b.buf.addF(fSample{t: t, f: f}) case chunkenc.ValHistogram: t, h := b.it.AtHistogram() - b.buf.add(hSample{t: t, h: h}) + b.buf.addH(hSample{t: t, h: h}) case chunkenc.ValFloatHistogram: t, fh := b.it.AtFloatHistogram() - b.buf.add(fhSample{t: t, fh: fh}) + b.buf.addFH(fhSample{t: t, fh: fh}) default: panic(fmt.Errorf("BufferedSeriesIterator: unknown value type %v", b.valueType)) } @@ -242,18 +242,44 @@ func (s fhSample) Type() chunkenc.ValueType { type sampleRing struct { delta int64 - buf []tsdbutil.Sample // lookback buffer - i int // position of most recent element in ring buffer - f int // position of first element in ring buffer - l int // number of elements in buffer + // Lookback buffers. We use buf for mixed samples, but one of the three + // concrete ones for homogenous samples. (Only one of the four bufs is + // allowed to be populated!) This avoids the overhead of the interface + // wrapper for the happy (and by far most common) case of homogenous + // samples. + buf []tsdbutil.Sample + fBuf []fSample + hBuf []hSample + fhBuf []fhSample + + i int // Position of most recent element in ring buffer. + f int // Position of first element in ring buffer. + l int // Number of elements in buffer. it sampleRingIterator } -func newSampleRing(delta int64, sz int) *sampleRing { - r := &sampleRing{delta: delta, buf: make([]tsdbutil.Sample, sz)} +// newSampleRing creates a new sampleRing. If you do not know the prefereed +// value type yet, use a size of 0 (in which case the provided typ doesn't +// matter). On the first add, a buffer of size 16 will be allocated with the +// preferred type being the type of the first added sample. +func newSampleRing(delta int64, size int, typ chunkenc.ValueType) *sampleRing { + r := &sampleRing{delta: delta} r.reset() - + if size <= 0 { + // Will initialize on first add. + return r + } + switch typ { + case chunkenc.ValFloat: + r.fBuf = make([]fSample, size) + case chunkenc.ValHistogram: + r.hBuf = make([]hSample, size) + case chunkenc.ValFloatHistogram: + r.fhBuf = make([]fhSample, size) + default: + r.buf = make([]tsdbutil.Sample, size) + } return r } @@ -274,7 +300,7 @@ type sampleRingIterator struct { r *sampleRing i int t int64 - v float64 + f float64 h *histogram.Histogram fh *histogram.FloatHistogram } @@ -284,6 +310,23 @@ func (it *sampleRingIterator) Next() chunkenc.ValueType { if it.i >= it.r.l { return chunkenc.ValNone } + switch { + case len(it.r.fBuf) > 0: + s := it.r.atF(it.i) + it.t = s.t + it.f = s.f + return chunkenc.ValFloat + case len(it.r.hBuf) > 0: + s := it.r.atH(it.i) + it.t = s.t + it.h = s.h + return chunkenc.ValHistogram + case len(it.r.fhBuf) > 0: + s := it.r.atFH(it.i) + it.t = s.t + it.fh = s.fh + return chunkenc.ValFloatHistogram + } s := it.r.at(it.i) it.t = s.T() switch s.Type() { @@ -294,7 +337,7 @@ func (it *sampleRingIterator) Next() chunkenc.ValueType { it.fh = s.FH() return chunkenc.ValFloatHistogram default: - it.v = s.V() + it.f = s.V() return chunkenc.ValFloat } } @@ -308,7 +351,7 @@ func (it *sampleRingIterator) Err() error { } func (it *sampleRingIterator) At() (int64, float64) { - return it.t, it.v + return it.t, it.f } func (it *sampleRingIterator) AtHistogram() (int64, *histogram.Histogram) { @@ -331,17 +374,128 @@ func (r *sampleRing) at(i int) tsdbutil.Sample { return r.buf[j] } -// add adds a sample to the ring buffer and frees all samples that fall -// out of the delta range. -func (r *sampleRing) add(s tsdbutil.Sample) { - l := len(r.buf) - // Grow the ring buffer if it fits no more elements. - if l == r.l { - buf := make([]tsdbutil.Sample, 2*l) - copy(buf[l+r.f:], r.buf[r.f:]) - copy(buf, r.buf[:r.f]) +func (r *sampleRing) atF(i int) fSample { + j := (r.f + i) % len(r.fBuf) + return r.fBuf[j] +} - r.buf = buf +func (r *sampleRing) atH(i int) hSample { + j := (r.f + i) % len(r.hBuf) + return r.hBuf[j] +} + +func (r *sampleRing) atFH(i int) fhSample { + j := (r.f + i) % len(r.fhBuf) + return r.fhBuf[j] +} + +// add adds a sample to the ring buffer and frees all samples that fall out of +// the delta range. Note that this method works for any sample +// implementation. If you know you are dealing with one of the implementations +// from this package (fSample, hSample, fhSample), call one of the specialized +// methods addF, addH, or addFH for better performance. +func (r *sampleRing) add(s tsdbutil.Sample) { + if len(r.buf) == 0 { + // Nothing added to the interface buf yet. Let's check if we can + // stay specialized. + switch s := s.(type) { + case fSample: + if len(r.hBuf)+len(r.fhBuf) == 0 { + r.fBuf = genericAdd(s, r.fBuf, r) + return + } + case hSample: + if len(r.fBuf)+len(r.fhBuf) == 0 { + r.hBuf = genericAdd(s, r.hBuf, r) + return + } + case fhSample: + if len(r.fBuf)+len(r.hBuf) == 0 { + r.fhBuf = genericAdd(s, r.fhBuf, r) + return + } + } + // The new sample isn't a fit for the already existing + // ones. Copy the latter into the interface buffer where needed. + switch { + case len(r.fBuf) > 0: + for _, s := range r.fBuf { + r.buf = append(r.buf, s) + } + r.fBuf = nil + case len(r.hBuf) > 0: + for _, s := range r.hBuf { + r.buf = append(r.buf, s) + } + r.hBuf = nil + case len(r.fhBuf) > 0: + for _, s := range r.fhBuf { + r.buf = append(r.buf, s) + } + r.fhBuf = nil + } + } + r.buf = genericAdd(s, r.buf, r) +} + +// addF is a version of the add method specialized for fSample. +func (r *sampleRing) addF(s fSample) { + switch { + case len(r.buf) > 0: + // Already have interface samples. Add to the interface buf. + r.buf = genericAdd[tsdbutil.Sample](s, r.buf, r) + case len(r.hBuf)+len(r.fhBuf) > 0: + // Already have specialized samples that are not fSamples. + // Need to call the checked add method for conversion. + r.add(s) + default: + r.fBuf = genericAdd(s, r.fBuf, r) + } +} + +// addH is a version of the add method specialized for hSample. +func (r *sampleRing) addH(s hSample) { + switch { + case len(r.buf) > 0: + // Already have interface samples. Add to the interface buf. + r.buf = genericAdd[tsdbutil.Sample](s, r.buf, r) + case len(r.fBuf)+len(r.fhBuf) > 0: + // Already have samples that are not hSamples. + // Need to call the checked add method for conversion. + r.add(s) + default: + r.hBuf = genericAdd(s, r.hBuf, r) + } +} + +// addFH is a version of the add method specialized for fhSample. +func (r *sampleRing) addFH(s fhSample) { + switch { + case len(r.buf) > 0: + // Already have interface samples. Add to the interface buf. + r.buf = genericAdd[tsdbutil.Sample](s, r.buf, r) + case len(r.fBuf)+len(r.hBuf) > 0: + // Already have samples that are not fhSamples. + // Need to call the checked add method for conversion. + r.add(s) + default: + r.fhBuf = genericAdd(s, r.fhBuf, r) + } +} + +func genericAdd[T tsdbutil.Sample](s T, buf []T, r *sampleRing) []T { + l := len(buf) + // Grow the ring buffer if it fits no more elements. + if l == 0 { + buf = make([]T, 16) + l = 16 + } + if l == r.l { + newBuf := make([]T, 2*l) + copy(newBuf[l+r.f:], buf[r.f:]) + copy(newBuf, buf[:r.f]) + + buf = newBuf r.i = r.f r.f += l l = 2 * l @@ -352,18 +506,19 @@ func (r *sampleRing) add(s tsdbutil.Sample) { } } - r.buf[r.i] = s + buf[r.i] = s r.l++ // Free head of the buffer of samples that just fell out of the range. tmin := s.T() - r.delta - for r.buf[r.f].T() < tmin { + for buf[r.f].T() < tmin { r.f++ if r.f >= l { r.f -= l } r.l-- } + return buf } // reduceDelta lowers the buffered time delta, dropping any samples that are @@ -378,17 +533,30 @@ func (r *sampleRing) reduceDelta(delta int64) bool { return true } + switch { + case len(r.fBuf) > 0: + genericReduceDelta(r.fBuf, r) + case len(r.hBuf) > 0: + genericReduceDelta(r.hBuf, r) + case len(r.fhBuf) > 0: + genericReduceDelta(r.fhBuf, r) + default: + genericReduceDelta(r.buf, r) + } + return true +} + +func genericReduceDelta[T tsdbutil.Sample](buf []T, r *sampleRing) { // Free head of the buffer of samples that just fell out of the range. - l := len(r.buf) - tmin := r.buf[r.i].T() - delta - for r.buf[r.f].T() < tmin { + l := len(buf) + tmin := buf[r.i].T() - r.delta + for buf[r.f].T() < tmin { r.f++ if r.f >= l { r.f -= l } r.l-- } - return true } // nthLast returns the nth most recent element added to the ring. @@ -396,7 +564,17 @@ func (r *sampleRing) nthLast(n int) (tsdbutil.Sample, bool) { if n > r.l { return fSample{}, false } - return r.at(r.l - n), true + i := r.l - n + switch { + case len(r.fBuf) > 0: + return r.atF(i), true + case len(r.hBuf) > 0: + return r.atH(i), true + case len(r.fhBuf) > 0: + return r.atFH(i), true + default: + return r.at(i), true + } } func (r *sampleRing) samples() []tsdbutil.Sample { @@ -404,13 +582,49 @@ func (r *sampleRing) samples() []tsdbutil.Sample { k := r.f + r.l var j int - if k > len(r.buf) { - k = len(r.buf) - j = r.l - k + r.f - } - n := copy(res, r.buf[r.f:k]) - copy(res[n:], r.buf[:j]) + switch { + case len(r.buf) > 0: + if k > len(r.buf) { + k = len(r.buf) + j = r.l - k + r.f + } + n := copy(res, r.buf[r.f:k]) + copy(res[n:], r.buf[:j]) + case len(r.fBuf) > 0: + if k > len(r.fBuf) { + k = len(r.fBuf) + j = r.l - k + r.f + } + resF := make([]fSample, r.l) + n := copy(resF, r.fBuf[r.f:k]) + copy(resF[n:], r.fBuf[:j]) + for i, s := range resF { + res[i] = s + } + case len(r.hBuf) > 0: + if k > len(r.hBuf) { + k = len(r.hBuf) + j = r.l - k + r.f + } + resH := make([]hSample, r.l) + n := copy(resH, r.hBuf[r.f:k]) + copy(resH[n:], r.hBuf[:j]) + for i, s := range resH { + res[i] = s + } + case len(r.fhBuf) > 0: + if k > len(r.fhBuf) { + k = len(r.fhBuf) + j = r.l - k + r.f + } + resFH := make([]fhSample, r.l) + n := copy(resFH, r.fhBuf[r.f:k]) + copy(resFH[n:], r.fhBuf[:j]) + for i, s := range resFH { + res[i] = s + } + } return res } diff --git a/storage/buffer_test.go b/storage/buffer_test.go index 7cfda4d4c..2fd9e81d0 100644 --- a/storage/buffer_test.go +++ b/storage/buffer_test.go @@ -56,7 +56,7 @@ func TestSampleRing(t *testing.T) { }, } for _, c := range cases { - r := newSampleRing(c.delta, c.size) + r := newSampleRing(c.delta, c.size, chunkenc.ValFloat) input := []fSample{} for _, t := range c.input { From 817a2396cb2b9d15c6e10d88fa3b6fffdc914d3e Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 30 Mar 2023 19:50:13 +0200 Subject: [PATCH 071/632] Name float values as "floats", not as "values" In the past, every sample value was a float, so it was fine to call a variable holding such a float "value" or "sample". With native histograms, a sample might have a histogram value. And a histogram value is still a value. Calling a float value just "value" or "sample" or "V" is therefore misleading. Over the last few commits, I already renamed many variables, but this cleans up a few more places where the changes are more invasive. Note that we do not to attempt naming in the JSON APIs or in the protobufs. That would be quite a disruption. However, internally, we can call variables as we want, and we should go with the option of avoiding misunderstandings. Signed-off-by: beorn7 --- promql/functions.go | 20 +- storage/buffer.go | 8 +- storage/buffer_test.go | 4 +- storage/series.go | 2 +- tsdb/agent/db_test.go | 6 +- tsdb/block_test.go | 16 +- tsdb/blockwriter_test.go | 4 +- tsdb/compact_test.go | 4 +- tsdb/db_test.go | 66 +++--- tsdb/head.go | 4 +- tsdb/head_test.go | 42 ++-- tsdb/ooo_head.go | 4 +- tsdb/ooo_head_read_test.go | 402 ++++++++++++++++++------------------- tsdb/ooo_head_test.go | 6 +- tsdb/querier_test.go | 16 +- tsdb/tsdbutil/chunks.go | 14 +- util/jsonutil/marshal.go | 6 +- web/federate.go | 2 +- 18 files changed, 313 insertions(+), 313 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index 8fc5e64a5..fd99703df 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -70,7 +70,7 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod samples = vals[0].(Matrix)[0] rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset) rangeEnd = enh.Ts - durationMilliseconds(vs.Offset) - resultValue float64 + resultFloat float64 resultHistogram *histogram.FloatHistogram firstT, lastT int64 numSamplesMinusOne int @@ -99,7 +99,7 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod numSamplesMinusOne = len(samples.Floats) - 1 firstT = samples.Floats[0].T lastT = samples.Floats[numSamplesMinusOne].T - resultValue = samples.Floats[numSamplesMinusOne].F - samples.Floats[0].F + resultFloat = samples.Floats[numSamplesMinusOne].F - samples.Floats[0].F if !isCounter { break } @@ -107,7 +107,7 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod prevValue := samples.Floats[0].F for _, currPoint := range samples.Floats[1:] { if currPoint.F < prevValue { - resultValue += prevValue + resultFloat += prevValue } prevValue = currPoint.F } @@ -124,14 +124,14 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod averageDurationBetweenSamples := sampledInterval / float64(numSamplesMinusOne) // TODO(beorn7): Do this for histograms, too. - if isCounter && resultValue > 0 && len(samples.Floats) > 0 && samples.Floats[0].F >= 0 { + if isCounter && resultFloat > 0 && len(samples.Floats) > 0 && samples.Floats[0].F >= 0 { // Counters cannot be negative. If we have any slope at all - // (i.e. resultValue went up), we can extrapolate the zero point + // (i.e. resultFloat went up), we can extrapolate the zero point // of the counter. If the duration to the zero point is shorter // than the durationToStart, we take the zero point as the start // of the series, thereby avoiding extrapolation to negative // counter values. - durationToZero := sampledInterval * (samples.Floats[0].F / resultValue) + durationToZero := sampledInterval * (samples.Floats[0].F / resultFloat) if durationToZero < durationToStart { durationToStart = durationToZero } @@ -159,12 +159,12 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod factor /= ms.Range.Seconds() } if resultHistogram == nil { - resultValue *= factor + resultFloat *= factor } else { resultHistogram.Scale(factor) } - return append(enh.Out, Sample{F: resultValue, H: resultHistogram}) + return append(enh.Out, Sample{F: resultFloat, H: resultHistogram}) } // histogramRate is a helper function for extrapolatedRate. It requires @@ -418,10 +418,10 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper toNearestInverse := 1.0 / toNearest for _, el := range vec { - v := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse + f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - F: v, + F: f, }) } return enh.Out diff --git a/storage/buffer.go b/storage/buffer.go index 9a9aff2af..0daadc044 100644 --- a/storage/buffer.go +++ b/storage/buffer.go @@ -173,7 +173,7 @@ func (s fSample) T() int64 { return s.t } -func (s fSample) V() float64 { +func (s fSample) F() float64 { return s.f } @@ -198,7 +198,7 @@ func (s hSample) T() int64 { return s.t } -func (s hSample) V() float64 { +func (s hSample) F() float64 { panic("F() called for hSample") } @@ -223,7 +223,7 @@ func (s fhSample) T() int64 { return s.t } -func (s fhSample) V() float64 { +func (s fhSample) F() float64 { panic("F() called for fhSample") } @@ -337,7 +337,7 @@ func (it *sampleRingIterator) Next() chunkenc.ValueType { it.fh = s.FH() return chunkenc.ValFloatHistogram default: - it.f = s.V() + it.f = s.F() return chunkenc.ValFloat } } diff --git a/storage/buffer_test.go b/storage/buffer_test.go index 2fd9e81d0..ebe24d8df 100644 --- a/storage/buffer_test.go +++ b/storage/buffer_test.go @@ -73,7 +73,7 @@ func TestSampleRing(t *testing.T) { for _, sold := range input[:i] { found := false for _, bs := range buffered { - if bs.T() == sold.t && bs.V() == sold.f { + if bs.T() == sold.t && bs.F() == sold.f { found = true break } @@ -110,7 +110,7 @@ func TestBufferedSeriesIterator(t *testing.T) { s, ok := it.PeekBack(1) require.Equal(t, eok, ok, "exist mismatch") require.Equal(t, ets, s.T(), "timestamp mismatch") - require.Equal(t, ev, s.V(), "value mismatch") + require.Equal(t, ev, s.F(), "value mismatch") } it = NewBufferIterator(NewListSeriesIterator(samples{ diff --git a/storage/series.go b/storage/series.go index 3f90f7702..f609df3f0 100644 --- a/storage/series.go +++ b/storage/series.go @@ -109,7 +109,7 @@ func (it *listSeriesIterator) Reset(samples Samples) { func (it *listSeriesIterator) At() (int64, float64) { s := it.samples.Get(it.idx) - return s.T(), s.V() + return s.T(), s.F() } func (it *listSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index 9b27aaa0b..f654fdb90 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -133,13 +133,13 @@ func TestCommit(t *testing.T) { for i := 0; i < numDatapoints; i++ { sample := tsdbutil.GenerateSamples(0, 1) - ref, err := app.Append(0, lset, sample[0].T(), sample[0].V()) + ref, err := app.Append(0, lset, sample[0].T(), sample[0].F()) require.NoError(t, err) e := exemplar.Exemplar{ Labels: lset, Ts: sample[0].T() + int64(i), - Value: sample[0].V(), + Value: sample[0].F(), HasTs: true, } _, err = app.AppendExemplar(ref, lset, e) @@ -248,7 +248,7 @@ func TestRollback(t *testing.T) { for i := 0; i < numDatapoints; i++ { sample := tsdbutil.GenerateSamples(0, 1) - _, err := app.Append(0, lset, sample[0].T(), sample[0].V()) + _, err := app.Append(0, lset, sample[0].T(), sample[0].F()) require.NoError(t, err) } } diff --git a/tsdb/block_test.go b/tsdb/block_test.go index ab3399962..49a997fc5 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -353,14 +353,14 @@ func TestReadIndexFormatV1(t *testing.T) { q, err := NewBlockQuerier(block, 0, 1000) require.NoError(t, err) require.Equal(t, query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")), - map[string][]tsdbutil.Sample{`{foo="bar"}`: {sample{t: 1, v: 2}}}) + map[string][]tsdbutil.Sample{`{foo="bar"}`: {sample{t: 1, f: 2}}}) q, err = NewBlockQuerier(block, 0, 1000) require.NoError(t, err) require.Equal(t, query(t, q, labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^.?$")), map[string][]tsdbutil.Sample{ - `{foo="bar"}`: {sample{t: 1, v: 2}}, - `{foo="baz"}`: {sample{t: 3, v: 4}}, + `{foo="bar"}`: {sample{t: 1, f: 2}}, + `{foo="baz"}`: {sample{t: 3, f: 4}}, }) } @@ -568,7 +568,7 @@ func createHeadWithOOOSamples(tb testing.TB, w *wlog.WL, series []storage.Series count++ t, v := it.At() if count%oooSampleFrequency == 0 { - os = append(os, sample{t: t, v: v}) + os = append(os, sample{t: t, f: v}) continue } ref, err = app.Append(ref, lset, t, v) @@ -589,7 +589,7 @@ func createHeadWithOOOSamples(tb testing.TB, w *wlog.WL, series []storage.Series for i, lset := range oooSampleLabels { ref := storage.SeriesRef(0) for _, sample := range oooSamples[i] { - ref, err = app.Append(ref, lset, sample.T(), sample.V()) + ref, err = app.Append(ref, lset, sample.T(), sample.F()) require.NoError(tb, err) oooSamplesAppended++ } @@ -613,7 +613,7 @@ const ( // genSeries generates series of float64 samples with a given number of labels and values. func genSeries(totalSeries, labelCount int, mint, maxt int64) []storage.Series { return genSeriesFromSampleGenerator(totalSeries, labelCount, mint, maxt, 1, func(ts int64) tsdbutil.Sample { - return sample{t: ts, v: rand.Float64()} + return sample{t: ts, f: rand.Float64()} }) } @@ -657,7 +657,7 @@ func genHistogramAndFloatSeries(totalSeries, labelCount int, mint, maxt, step in count++ var s sample if floatSample { - s = sample{t: ts, v: rand.Float64()} + s = sample{t: ts, f: rand.Float64()} } else { h := &histogram.Histogram{ Count: 5 + uint64(ts*4), @@ -729,7 +729,7 @@ func populateSeries(lbls []map[string]string, mint, maxt int64) []storage.Series } samples := make([]tsdbutil.Sample, 0, maxt-mint+1) for t := mint; t <= maxt; t++ { - samples = append(samples, sample{t: t, v: rand.Float64()}) + samples = append(samples, sample{t: t, f: rand.Float64()}) } series = append(series, storage.NewListSeries(labels.FromMap(lbl), samples)) } diff --git a/tsdb/blockwriter_test.go b/tsdb/blockwriter_test.go index e6703b798..84ea8d51b 100644 --- a/tsdb/blockwriter_test.go +++ b/tsdb/blockwriter_test.go @@ -52,8 +52,8 @@ func TestBlockWriter(t *testing.T) { q, err := NewBlockQuerier(b, math.MinInt64, math.MaxInt64) require.NoError(t, err) series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")) - sample1 := []tsdbutil.Sample{sample{t: ts1, v: v1}} - sample2 := []tsdbutil.Sample{sample{t: ts2, v: v2}} + sample1 := []tsdbutil.Sample{sample{t: ts1, f: v1}} + sample2 := []tsdbutil.Sample{sample{t: ts2, f: v2}} expectedSeries := map[string][]tsdbutil.Sample{"{a=\"b\"}": sample1, "{c=\"d\"}": sample2} require.Equal(t, expectedSeries, series) diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 1b0684521..6a7e6ea68 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -975,7 +975,7 @@ func TestCompaction_populateBlock(t *testing.T) { s sample ) for iter.Next() == chunkenc.ValFloat { - s.t, s.v = iter.At() + s.t, s.f = iter.At() if firstTs == math.MaxInt64 { firstTs = s.t } @@ -1350,7 +1350,7 @@ func TestHeadCompactionWithHistograms(t *testing.T) { for tsMinute := from; tsMinute <= to; tsMinute++ { _, err := app.Append(0, lbls, minute(tsMinute), float64(tsMinute)) require.NoError(t, err) - *exp = append(*exp, sample{t: minute(tsMinute), v: float64(tsMinute)}) + *exp = append(*exp, sample{t: minute(tsMinute), f: float64(tsMinute)}) } require.NoError(t, app.Commit()) } diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 70639085e..c54fccf6f 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -104,7 +104,7 @@ func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[str switch typ { case chunkenc.ValFloat: ts, v := it.At() - samples = append(samples, sample{t: ts, v: v}) + samples = append(samples, sample{t: ts, f: v}) case chunkenc.ValHistogram: ts, h := it.AtHistogram() samples = append(samples, sample{t: ts, h: h}) @@ -233,7 +233,7 @@ func TestDataAvailableOnlyAfterCommit(t *testing.T) { seriesSet = query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) - require.Equal(t, map[string][]tsdbutil.Sample{`{foo="bar"}`: {sample{t: 0, v: 0}}}, seriesSet) + require.Equal(t, map[string][]tsdbutil.Sample{`{foo="bar"}`: {sample{t: 0, f: 0}}}, seriesSet) } // TestNoPanicAfterWALCorruption ensures that querying the db after a WAL corruption doesn't cause a panic. @@ -251,7 +251,7 @@ func TestNoPanicAfterWALCorruption(t *testing.T) { for i := 0; i < 121; i++ { app := db.Appender(ctx) _, err := app.Append(0, labels.FromStrings("foo", "bar"), maxt, 0) - expSamples = append(expSamples, sample{t: maxt, v: 0}) + expSamples = append(expSamples, sample{t: maxt, f: 0}) require.NoError(t, err) require.NoError(t, app.Commit()) maxt++ @@ -364,11 +364,11 @@ func TestDBAppenderAddRef(t *testing.T) { require.Equal(t, map[string][]tsdbutil.Sample{ labels.FromStrings("a", "b").String(): { - sample{t: 123, v: 0}, - sample{t: 124, v: 1}, - sample{t: 125, v: 0}, - sample{t: 133, v: 1}, - sample{t: 143, v: 2}, + sample{t: 123, f: 0}, + sample{t: 124, f: 1}, + sample{t: 125, f: 0}, + sample{t: 133, f: 1}, + sample{t: 143, f: 2}, }, }, res) } @@ -1740,7 +1740,7 @@ func expandSeriesSet(ss storage.SeriesSet) ([]labels.Labels, map[string][]sample it = series.Iterator(it) for it.Next() == chunkenc.ValFloat { t, v := it.At() - samples = append(samples, sample{t: t, v: v}) + samples = append(samples, sample{t: t, f: v}) } resultLabels = append(resultLabels, series.Labels()) resultSamples[series.Labels().String()] = samples @@ -2617,7 +2617,7 @@ func TestDBCannotSeePartialCommits(t *testing.T) { values := map[float64]struct{}{} for _, series := range seriesSet { - values[series[len(series)-1].v] = struct{}{} + values[series[len(series)-1].f] = struct{}{} } if len(values) != 1 { inconsistencies++ @@ -2693,7 +2693,7 @@ func TestDBQueryDoesntSeeAppendsAfterCreation(t *testing.T) { _, seriesSet, ws, err = expandSeriesSet(ss) require.NoError(t, err) require.Equal(t, 0, len(ws)) - require.Equal(t, map[string][]sample{`{foo="bar"}`: {{t: 0, v: 0}}}, seriesSet) + require.Equal(t, map[string][]sample{`{foo="bar"}`: {{t: 0, f: 0}}}, seriesSet) } // TestChunkWriter_ReadAfterWrite ensures that chunk segment are cut at the set segment size and @@ -4575,7 +4575,7 @@ func Test_Querier_OOOQuery(t *testing.T) { for min := fromMins; min <= toMins; min += time.Minute.Milliseconds() { _, err := app.Append(0, series1, min, float64(min)) if min >= queryMinT && min <= queryMaxT { - expSamples = append(expSamples, sample{t: min, v: float64(min)}) + expSamples = append(expSamples, sample{t: min, f: float64(min)}) } require.NoError(t, err) totalAppended++ @@ -4660,7 +4660,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { for min := fromMins; min <= toMins; min += time.Minute.Milliseconds() { _, err := app.Append(0, series1, min, float64(min)) if min >= queryMinT && min <= queryMaxT { - expSamples = append(expSamples, sample{t: min, v: float64(min)}) + expSamples = append(expSamples, sample{t: min, f: float64(min)}) } require.NoError(t, err) totalAppended++ @@ -4730,7 +4730,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { it := chunk.Chunk.Iterator(nil) for it.Next() == chunkenc.ValFloat { ts, v := it.At() - gotSamples = append(gotSamples, sample{t: ts, v: v}) + gotSamples = append(gotSamples, sample{t: ts, f: v}) } } require.Equal(t, expSamples, gotSamples) @@ -4766,7 +4766,7 @@ func TestOOOAppendAndQuery(t *testing.T) { require.Error(t, err) } else { require.NoError(t, err) - appendedSamples[key] = append(appendedSamples[key], sample{t: min, v: val}) + appendedSamples[key] = append(appendedSamples[key], sample{t: min, f: val}) totalSamples++ } } @@ -4889,7 +4889,7 @@ func TestOOODisabled(t *testing.T) { failedSamples++ } else { require.NoError(t, err) - expSamples[key] = append(expSamples[key], sample{t: min, v: val}) + expSamples[key] = append(expSamples[key], sample{t: min, f: val}) totalSamples++ } } @@ -4952,7 +4952,7 @@ func TestWBLAndMmapReplay(t *testing.T) { val := rand.Float64() _, err := app.Append(0, lbls, min, val) require.NoError(t, err) - expSamples[key] = append(expSamples[key], sample{t: min, v: val}) + expSamples[key] = append(expSamples[key], sample{t: min, f: val}) totalSamples++ } require.NoError(t, app.Commit()) @@ -4995,7 +4995,7 @@ func TestWBLAndMmapReplay(t *testing.T) { it := chk.Iterator(nil) for it.Next() == chunkenc.ValFloat { ts, val := it.At() - s1MmapSamples = append(s1MmapSamples, sample{t: ts, v: val}) + s1MmapSamples = append(s1MmapSamples, sample{t: ts, f: val}) } } require.Greater(t, len(s1MmapSamples), 0) @@ -5273,9 +5273,9 @@ func TestWBLCorruption(t *testing.T) { ts := min * time.Minute.Milliseconds() _, err := app.Append(0, series1, ts, float64(ts)) require.NoError(t, err) - allSamples = append(allSamples, sample{t: ts, v: float64(ts)}) + allSamples = append(allSamples, sample{t: ts, f: float64(ts)}) if afterRestart { - expAfterRestart = append(expAfterRestart, sample{t: ts, v: float64(ts)}) + expAfterRestart = append(expAfterRestart, sample{t: ts, f: float64(ts)}) } } require.NoError(t, app.Commit()) @@ -5419,9 +5419,9 @@ func TestOOOMmapCorruption(t *testing.T) { ts := min * time.Minute.Milliseconds() _, err := app.Append(0, series1, ts, float64(ts)) require.NoError(t, err) - allSamples = append(allSamples, sample{t: ts, v: float64(ts)}) + allSamples = append(allSamples, sample{t: ts, f: float64(ts)}) if inMmapAfterCorruption { - expInMmapChunks = append(expInMmapChunks, sample{t: ts, v: float64(ts)}) + expInMmapChunks = append(expInMmapChunks, sample{t: ts, f: float64(ts)}) } } require.NoError(t, app.Commit()) @@ -5555,7 +5555,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { _, err := app.Append(0, series1, ts, float64(ts)) if success { require.NoError(t, err) - allSamples = append(allSamples, sample{t: ts, v: float64(ts)}) + allSamples = append(allSamples, sample{t: ts, f: float64(ts)}) } else { require.Error(t, err) } @@ -5769,7 +5769,7 @@ func TestNoGapAfterRestartWithOOO(t *testing.T) { var expSamples []tsdbutil.Sample for min := fromMins; min <= toMins; min++ { ts := min * time.Minute.Milliseconds() - expSamples = append(expSamples, sample{t: ts, v: float64(ts)}) + expSamples = append(expSamples, sample{t: ts, f: float64(ts)}) } expRes := map[string][]tsdbutil.Sample{ @@ -5876,7 +5876,7 @@ func TestWblReplayAfterOOODisableAndRestart(t *testing.T) { ts := min * time.Minute.Milliseconds() _, err := app.Append(0, series1, ts, float64(ts)) require.NoError(t, err) - allSamples = append(allSamples, sample{t: ts, v: float64(ts)}) + allSamples = append(allSamples, sample{t: ts, f: float64(ts)}) } require.NoError(t, app.Commit()) } @@ -5935,7 +5935,7 @@ func TestPanicOnApplyConfig(t *testing.T) { ts := min * time.Minute.Milliseconds() _, err := app.Append(0, series1, ts, float64(ts)) require.NoError(t, err) - allSamples = append(allSamples, sample{t: ts, v: float64(ts)}) + allSamples = append(allSamples, sample{t: ts, f: float64(ts)}) } require.NoError(t, app.Commit()) } @@ -5983,7 +5983,7 @@ func TestDiskFillingUpAfterDisablingOOO(t *testing.T) { ts := min * time.Minute.Milliseconds() _, err := app.Append(0, series1, ts, float64(ts)) require.NoError(t, err) - allSamples = append(allSamples, sample{t: ts, v: float64(ts)}) + allSamples = append(allSamples, sample{t: ts, f: float64(ts)}) } require.NoError(t, app.Commit()) } @@ -6090,7 +6090,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { _, err := app.Append(0, lbls, minute(tsMinute), val) require.NoError(t, err) require.NoError(t, app.Commit()) - *exp = append(*exp, sample{t: minute(tsMinute), v: val}) + *exp = append(*exp, sample{t: minute(tsMinute), f: val}) } testQuery := func(name, value string, exp map[string][]tsdbutil.Sample) { @@ -6346,7 +6346,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) { switch typ { case chunkenc.ValFloat: ts, v := it.At() - slice = append(slice, sample{t: ts, v: v}) + slice = append(slice, sample{t: ts, f: v}) case chunkenc.ValHistogram: ts, h := it.AtHistogram() slice = append(slice, sample{t: ts, h: h}) @@ -6418,7 +6418,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) { testBlockQuerying(t, genHistogramSeries(10, 5, minute(0), minute(119), minute(1), floatHistogram), genSeriesFromSampleGenerator(10, 5, minute(120), minute(239), minute(1), func(ts int64) tsdbutil.Sample { - return sample{t: ts, v: rand.Float64()} + return sample{t: ts, f: rand.Float64()} }), genHistogramSeries(10, 5, minute(240), minute(359), minute(1), floatHistogram), ) @@ -6430,7 +6430,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) { genHistogramSeries(10, 5, minute(61), minute(120), minute(1), floatHistogram), genHistogramAndFloatSeries(10, 5, minute(121), minute(180), minute(1), floatHistogram), genSeriesFromSampleGenerator(10, 5, minute(181), minute(240), minute(1), func(ts int64) tsdbutil.Sample { - return sample{t: ts, v: rand.Float64()} + return sample{t: ts, f: rand.Float64()} }), ) }) @@ -6447,7 +6447,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) { testBlockQuerying(t, genHistogramSeries(10, 5, minute(0), minute(120), minute(3), floatHistogram), genSeriesFromSampleGenerator(10, 5, minute(1), minute(120), minute(3), func(ts int64) tsdbutil.Sample { - return sample{t: ts, v: rand.Float64()} + return sample{t: ts, f: rand.Float64()} }), genHistogramSeries(10, 5, minute(2), minute(120), minute(3), floatHistogram), ) @@ -6459,7 +6459,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) { genHistogramSeries(10, 5, minute(46), minute(100), minute(3), floatHistogram), genHistogramAndFloatSeries(10, 5, minute(89), minute(140), minute(3), floatHistogram), genSeriesFromSampleGenerator(10, 5, minute(126), minute(200), minute(3), func(ts int64) tsdbutil.Sample { - return sample{t: ts, v: rand.Float64()} + return sample{t: ts, f: rand.Float64()} }), ) }) diff --git a/tsdb/head.go b/tsdb/head.go index af8175cd0..4696884f2 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -1864,7 +1864,7 @@ func (s *stripeSeries) getOrSet(hash uint64, lset labels.Labels, createSeries fu type sample struct { t int64 - v float64 + f float64 h *histogram.Histogram fh *histogram.FloatHistogram } @@ -1874,7 +1874,7 @@ func newSample(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHi } func (s sample) T() int64 { return s.t } -func (s sample) V() float64 { return s.v } +func (s sample) F() float64 { return s.f } func (s sample) H() *histogram.Histogram { return s.h } func (s sample) FH() *histogram.FloatHistogram { return s.fh } diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 0a2a2ee6f..e80c197b2 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -465,8 +465,8 @@ func TestHead_HighConcurrencyReadAndWrite(t *testing.T) { if sample.T() != int64(expectedValue) { return false, fmt.Errorf("expected sample %d to have ts %d, got %d", sampleIdx, expectedValue, sample.T()) } - if sample.V() != float64(expectedValue) { - return false, fmt.Errorf("expected sample %d to have value %d, got %f", sampleIdx, expectedValue, sample.V()) + if sample.F() != float64(expectedValue) { + return false, fmt.Errorf("expected sample %d to have value %d, got %f", sampleIdx, expectedValue, sample.F()) } } @@ -575,7 +575,7 @@ func TestHead_ReadWAL(t *testing.T) { expandChunk := func(c chunkenc.Iterator) (x []sample) { for c.Next() == chunkenc.ValFloat { t, v := c.At() - x = append(x, sample{t: t, v: v}) + x = append(x, sample{t: t, f: v}) } require.NoError(t, c.Err()) return x @@ -870,7 +870,7 @@ func TestHeadDeleteSimple(t *testing.T) { buildSmpls := func(s []int64) []sample { ss := make([]sample, 0, len(s)) for _, t := range s { - ss = append(ss, sample{t: t, v: float64(t)}) + ss = append(ss, sample{t: t, f: float64(t)}) } return ss } @@ -925,7 +925,7 @@ func TestHeadDeleteSimple(t *testing.T) { app := head.Appender(context.Background()) for _, smpl := range smplsAll { - _, err := app.Append(0, lblsDefault, smpl.t, smpl.v) + _, err := app.Append(0, lblsDefault, smpl.t, smpl.f) require.NoError(t, err) } @@ -939,7 +939,7 @@ func TestHeadDeleteSimple(t *testing.T) { // Add more samples. app = head.Appender(context.Background()) for _, smpl := range c.addSamples { - _, err := app.Append(0, lblsDefault, smpl.t, smpl.v) + _, err := app.Append(0, lblsDefault, smpl.t, smpl.f) require.NoError(t, err) } @@ -1924,7 +1924,7 @@ func TestMemSeriesIsolation(t *testing.T) { require.Equal(t, 0, len(ws)) for _, series := range seriesSet { - return int(series[len(series)-1].v) + return int(series[len(series)-1].f) } return -1 } @@ -3088,7 +3088,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { ts++ _, err := app.Append(0, s2, int64(ts), float64(ts)) require.NoError(t, err) - exp[k2] = append(exp[k2], sample{t: int64(ts), v: float64(ts)}) + exp[k2] = append(exp[k2], sample{t: int64(ts), f: float64(ts)}) } require.NoError(t, app.Commit()) app = head.Appender(context.Background()) @@ -3125,7 +3125,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { ts++ _, err := app.Append(0, s2, int64(ts), float64(ts)) require.NoError(t, err) - exp[k2] = append(exp[k2], sample{t: int64(ts), v: float64(ts)}) + exp[k2] = append(exp[k2], sample{t: int64(ts), f: float64(ts)}) } require.NoError(t, app.Commit()) app = head.Appender(context.Background()) @@ -3812,7 +3812,7 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) { expChunks: 1, }, { - samples: []tsdbutil.Sample{sample{t: 200, v: 2}}, + samples: []tsdbutil.Sample{sample{t: 200, f: 2}}, expChunks: 2, }, { @@ -3836,7 +3836,7 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) { expChunks: 6, }, { - samples: []tsdbutil.Sample{sample{t: 100, v: 2}}, + samples: []tsdbutil.Sample{sample{t: 100, f: 2}}, err: storage.ErrOutOfOrderSample, }, { @@ -3847,13 +3847,13 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) { // Combination of histograms and float64 in the same commit. The behaviour is undefined, but we want to also // verify how TSDB would behave. Here the histogram is appended at the end, hence will be considered as out of order. samples: []tsdbutil.Sample{ - sample{t: 400, v: 4}, + sample{t: 400, f: 4}, sample{t: 500, h: hists[5]}, // This won't be committed. - sample{t: 600, v: 6}, + sample{t: 600, f: 6}, }, addToExp: []tsdbutil.Sample{ - sample{t: 400, v: 4}, - sample{t: 600, v: 6}, + sample{t: 400, f: 4}, + sample{t: 600, f: 6}, }, expChunks: 7, // Only 1 new chunk for float64. }, @@ -3861,11 +3861,11 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) { // Here the histogram is appended at the end, hence the first histogram is out of order. samples: []tsdbutil.Sample{ sample{t: 700, h: hists[7]}, // Out of order w.r.t. the next float64 sample that is appended first. - sample{t: 800, v: 8}, + sample{t: 800, f: 8}, sample{t: 900, h: hists[9]}, }, addToExp: []tsdbutil.Sample{ - sample{t: 800, v: 8}, + sample{t: 800, f: 8}, sample{t: 900, h: hists[9].Copy()}, }, expChunks: 8, // float64 added to old chunk, only 1 new for histograms. @@ -3890,7 +3890,7 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) { if s.H() != nil || s.FH() != nil { _, err = app.AppendHistogram(0, lbls, s.T(), s.H(), s.FH()) } else { - _, err = app.Append(0, lbls, s.T(), s.V()) + _, err = app.Append(0, lbls, s.T(), s.F()) } require.Equal(t, a.err, err) } @@ -4056,7 +4056,7 @@ func TestOOOWalReplay(t *testing.T) { require.NoError(t, app.Commit()) if isOOO { - expOOOSamples = append(expOOOSamples, sample{t: ts, v: v}) + expOOOSamples = append(expOOOSamples, sample{t: ts, f: v}) } } @@ -4100,7 +4100,7 @@ func TestOOOWalReplay(t *testing.T) { actOOOSamples := make([]sample, 0, len(expOOOSamples)) for it.Next() == chunkenc.ValFloat { ts, v := it.At() - actOOOSamples = append(actOOOSamples, sample{t: ts, v: v}) + actOOOSamples = append(actOOOSamples, sample{t: ts, f: v}) } // OOO chunk will be sorted. Hence sort the expected samples. @@ -4360,7 +4360,7 @@ func TestReplayAfterMmapReplayError(t *testing.T) { var ref storage.SeriesRef for i := 0; i < numSamples; i++ { ref, err = app.Append(ref, lbls, lastTs, float64(lastTs)) - expSamples = append(expSamples, sample{t: lastTs, v: float64(lastTs)}) + expSamples = append(expSamples, sample{t: lastTs, f: float64(lastTs)}) require.NoError(t, err) lastTs += itvl if i%10 == 0 { diff --git a/tsdb/ooo_head.go b/tsdb/ooo_head.go index 63d0b3712..45827889e 100644 --- a/tsdb/ooo_head.go +++ b/tsdb/ooo_head.go @@ -78,7 +78,7 @@ func (o *OOOChunk) ToXOR() (*chunkenc.XORChunk, error) { return nil, err } for _, s := range o.samples { - app.Append(s.t, s.v) + app.Append(s.t, s.f) } return x, nil } @@ -96,7 +96,7 @@ func (o *OOOChunk) ToXORBetweenTimestamps(mint, maxt int64) (*chunkenc.XORChunk, if s.t > maxt { break } - app.Append(s.t, s.v) + app.Append(s.t, s.f) } return x, nil } diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index 177bd2326..f3ec862f5 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -504,8 +504,8 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { queryMaxT: minutes(100), firstInOrderSampleAt: minutes(120), inputSamples: tsdbutil.SampleSlice{ - sample{t: minutes(30), v: float64(0)}, - sample{t: minutes(40), v: float64(0)}, + sample{t: minutes(30), f: float64(0)}, + sample{t: minutes(40), f: float64(0)}, }, expChunkError: false, // ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100 @@ -514,8 +514,8 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { // Output Graphically [--------] (With 2 samples) expChunksSamples: []tsdbutil.SampleSlice{ { - sample{t: minutes(30), v: float64(0)}, - sample{t: minutes(40), v: float64(0)}, + sample{t: minutes(30), f: float64(0)}, + sample{t: minutes(40), f: float64(0)}, }, }, }, @@ -526,15 +526,15 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { firstInOrderSampleAt: minutes(120), inputSamples: tsdbutil.SampleSlice{ // opts.OOOCapMax is 5 so these will be mmapped to the first mmapped chunk - sample{t: minutes(41), v: float64(0)}, - sample{t: minutes(42), v: float64(0)}, - sample{t: minutes(43), v: float64(0)}, - sample{t: minutes(44), v: float64(0)}, - sample{t: minutes(45), v: float64(0)}, + sample{t: minutes(41), f: float64(0)}, + sample{t: minutes(42), f: float64(0)}, + sample{t: minutes(43), f: float64(0)}, + sample{t: minutes(44), f: float64(0)}, + sample{t: minutes(45), f: float64(0)}, // The following samples will go to the head chunk, and we want it // to overlap with the previous chunk - sample{t: minutes(30), v: float64(1)}, - sample{t: minutes(50), v: float64(1)}, + sample{t: minutes(30), f: float64(1)}, + sample{t: minutes(50), f: float64(1)}, }, expChunkError: false, // ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100 @@ -544,13 +544,13 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { // Output Graphically [-----------------] (With 7 samples) expChunksSamples: []tsdbutil.SampleSlice{ { - sample{t: minutes(30), v: float64(1)}, - sample{t: minutes(41), v: float64(0)}, - sample{t: minutes(42), v: float64(0)}, - sample{t: minutes(43), v: float64(0)}, - sample{t: minutes(44), v: float64(0)}, - sample{t: minutes(45), v: float64(0)}, - sample{t: minutes(50), v: float64(1)}, + sample{t: minutes(30), f: float64(1)}, + sample{t: minutes(41), f: float64(0)}, + sample{t: minutes(42), f: float64(0)}, + sample{t: minutes(43), f: float64(0)}, + sample{t: minutes(44), f: float64(0)}, + sample{t: minutes(45), f: float64(0)}, + sample{t: minutes(50), f: float64(1)}, }, }, }, @@ -561,26 +561,26 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { firstInOrderSampleAt: minutes(120), inputSamples: tsdbutil.SampleSlice{ // Chunk 0 - sample{t: minutes(10), v: float64(0)}, - sample{t: minutes(12), v: float64(0)}, - sample{t: minutes(14), v: float64(0)}, - sample{t: minutes(16), v: float64(0)}, - sample{t: minutes(20), v: float64(0)}, + sample{t: minutes(10), f: float64(0)}, + sample{t: minutes(12), f: float64(0)}, + sample{t: minutes(14), f: float64(0)}, + sample{t: minutes(16), f: float64(0)}, + sample{t: minutes(20), f: float64(0)}, // Chunk 1 - sample{t: minutes(20), v: float64(1)}, - sample{t: minutes(22), v: float64(1)}, - sample{t: minutes(24), v: float64(1)}, - sample{t: minutes(26), v: float64(1)}, - sample{t: minutes(29), v: float64(1)}, + sample{t: minutes(20), f: float64(1)}, + sample{t: minutes(22), f: float64(1)}, + sample{t: minutes(24), f: float64(1)}, + sample{t: minutes(26), f: float64(1)}, + sample{t: minutes(29), f: float64(1)}, // Chunk 2 - sample{t: minutes(30), v: float64(2)}, - sample{t: minutes(32), v: float64(2)}, - sample{t: minutes(34), v: float64(2)}, - sample{t: minutes(36), v: float64(2)}, - sample{t: minutes(40), v: float64(2)}, + sample{t: minutes(30), f: float64(2)}, + sample{t: minutes(32), f: float64(2)}, + sample{t: minutes(34), f: float64(2)}, + sample{t: minutes(36), f: float64(2)}, + sample{t: minutes(40), f: float64(2)}, // Head - sample{t: minutes(40), v: float64(3)}, - sample{t: minutes(50), v: float64(3)}, + sample{t: minutes(40), f: float64(3)}, + sample{t: minutes(50), f: float64(3)}, }, expChunkError: false, // ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100 @@ -592,23 +592,23 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { // Output Graphically [----------------][-----------------] expChunksSamples: []tsdbutil.SampleSlice{ { - sample{t: minutes(10), v: float64(0)}, - sample{t: minutes(12), v: float64(0)}, - sample{t: minutes(14), v: float64(0)}, - sample{t: minutes(16), v: float64(0)}, - sample{t: minutes(20), v: float64(1)}, - sample{t: minutes(22), v: float64(1)}, - sample{t: minutes(24), v: float64(1)}, - sample{t: minutes(26), v: float64(1)}, - sample{t: minutes(29), v: float64(1)}, + sample{t: minutes(10), f: float64(0)}, + sample{t: minutes(12), f: float64(0)}, + sample{t: minutes(14), f: float64(0)}, + sample{t: minutes(16), f: float64(0)}, + sample{t: minutes(20), f: float64(1)}, + sample{t: minutes(22), f: float64(1)}, + sample{t: minutes(24), f: float64(1)}, + sample{t: minutes(26), f: float64(1)}, + sample{t: minutes(29), f: float64(1)}, }, { - sample{t: minutes(30), v: float64(2)}, - sample{t: minutes(32), v: float64(2)}, - sample{t: minutes(34), v: float64(2)}, - sample{t: minutes(36), v: float64(2)}, - sample{t: minutes(40), v: float64(3)}, - sample{t: minutes(50), v: float64(3)}, + sample{t: minutes(30), f: float64(2)}, + sample{t: minutes(32), f: float64(2)}, + sample{t: minutes(34), f: float64(2)}, + sample{t: minutes(36), f: float64(2)}, + sample{t: minutes(40), f: float64(3)}, + sample{t: minutes(50), f: float64(3)}, }, }, }, @@ -619,26 +619,26 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { firstInOrderSampleAt: minutes(120), inputSamples: tsdbutil.SampleSlice{ // Chunk 0 - sample{t: minutes(40), v: float64(0)}, - sample{t: minutes(42), v: float64(0)}, - sample{t: minutes(44), v: float64(0)}, - sample{t: minutes(46), v: float64(0)}, - sample{t: minutes(50), v: float64(0)}, + sample{t: minutes(40), f: float64(0)}, + sample{t: minutes(42), f: float64(0)}, + sample{t: minutes(44), f: float64(0)}, + sample{t: minutes(46), f: float64(0)}, + sample{t: minutes(50), f: float64(0)}, // Chunk 1 - sample{t: minutes(30), v: float64(1)}, - sample{t: minutes(32), v: float64(1)}, - sample{t: minutes(34), v: float64(1)}, - sample{t: minutes(36), v: float64(1)}, - sample{t: minutes(40), v: float64(1)}, + sample{t: minutes(30), f: float64(1)}, + sample{t: minutes(32), f: float64(1)}, + sample{t: minutes(34), f: float64(1)}, + sample{t: minutes(36), f: float64(1)}, + sample{t: minutes(40), f: float64(1)}, // Chunk 2 - sample{t: minutes(20), v: float64(2)}, - sample{t: minutes(22), v: float64(2)}, - sample{t: minutes(24), v: float64(2)}, - sample{t: minutes(26), v: float64(2)}, - sample{t: minutes(29), v: float64(2)}, + sample{t: minutes(20), f: float64(2)}, + sample{t: minutes(22), f: float64(2)}, + sample{t: minutes(24), f: float64(2)}, + sample{t: minutes(26), f: float64(2)}, + sample{t: minutes(29), f: float64(2)}, // Head - sample{t: minutes(10), v: float64(3)}, - sample{t: minutes(20), v: float64(3)}, + sample{t: minutes(10), f: float64(3)}, + sample{t: minutes(20), f: float64(3)}, }, expChunkError: false, // ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100 @@ -650,23 +650,23 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { // Output Graphically [----------------][-----------------] expChunksSamples: []tsdbutil.SampleSlice{ { - sample{t: minutes(10), v: float64(3)}, - sample{t: minutes(20), v: float64(2)}, - sample{t: minutes(22), v: float64(2)}, - sample{t: minutes(24), v: float64(2)}, - sample{t: minutes(26), v: float64(2)}, - sample{t: minutes(29), v: float64(2)}, + sample{t: minutes(10), f: float64(3)}, + sample{t: minutes(20), f: float64(2)}, + sample{t: minutes(22), f: float64(2)}, + sample{t: minutes(24), f: float64(2)}, + sample{t: minutes(26), f: float64(2)}, + sample{t: minutes(29), f: float64(2)}, }, { - sample{t: minutes(30), v: float64(1)}, - sample{t: minutes(32), v: float64(1)}, - sample{t: minutes(34), v: float64(1)}, - sample{t: minutes(36), v: float64(1)}, - sample{t: minutes(40), v: float64(0)}, - sample{t: minutes(42), v: float64(0)}, - sample{t: minutes(44), v: float64(0)}, - sample{t: minutes(46), v: float64(0)}, - sample{t: minutes(50), v: float64(0)}, + sample{t: minutes(30), f: float64(1)}, + sample{t: minutes(32), f: float64(1)}, + sample{t: minutes(34), f: float64(1)}, + sample{t: minutes(36), f: float64(1)}, + sample{t: minutes(40), f: float64(0)}, + sample{t: minutes(42), f: float64(0)}, + sample{t: minutes(44), f: float64(0)}, + sample{t: minutes(46), f: float64(0)}, + sample{t: minutes(50), f: float64(0)}, }, }, }, @@ -677,26 +677,26 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { firstInOrderSampleAt: minutes(120), inputSamples: tsdbutil.SampleSlice{ // Chunk 0 - sample{t: minutes(10), v: float64(0)}, - sample{t: minutes(12), v: float64(0)}, - sample{t: minutes(14), v: float64(0)}, - sample{t: minutes(16), v: float64(0)}, - sample{t: minutes(18), v: float64(0)}, + sample{t: minutes(10), f: float64(0)}, + sample{t: minutes(12), f: float64(0)}, + sample{t: minutes(14), f: float64(0)}, + sample{t: minutes(16), f: float64(0)}, + sample{t: minutes(18), f: float64(0)}, // Chunk 1 - sample{t: minutes(20), v: float64(1)}, - sample{t: minutes(22), v: float64(1)}, - sample{t: minutes(24), v: float64(1)}, - sample{t: minutes(26), v: float64(1)}, - sample{t: minutes(28), v: float64(1)}, + sample{t: minutes(20), f: float64(1)}, + sample{t: minutes(22), f: float64(1)}, + sample{t: minutes(24), f: float64(1)}, + sample{t: minutes(26), f: float64(1)}, + sample{t: minutes(28), f: float64(1)}, // Chunk 2 - sample{t: minutes(30), v: float64(2)}, - sample{t: minutes(32), v: float64(2)}, - sample{t: minutes(34), v: float64(2)}, - sample{t: minutes(36), v: float64(2)}, - sample{t: minutes(38), v: float64(2)}, + sample{t: minutes(30), f: float64(2)}, + sample{t: minutes(32), f: float64(2)}, + sample{t: minutes(34), f: float64(2)}, + sample{t: minutes(36), f: float64(2)}, + sample{t: minutes(38), f: float64(2)}, // Head - sample{t: minutes(40), v: float64(3)}, - sample{t: minutes(42), v: float64(3)}, + sample{t: minutes(40), f: float64(3)}, + sample{t: minutes(42), f: float64(3)}, }, expChunkError: false, // ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100 @@ -708,29 +708,29 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { // Output Graphically [-------][-------][-------][--------] expChunksSamples: []tsdbutil.SampleSlice{ { - sample{t: minutes(10), v: float64(0)}, - sample{t: minutes(12), v: float64(0)}, - sample{t: minutes(14), v: float64(0)}, - sample{t: minutes(16), v: float64(0)}, - sample{t: minutes(18), v: float64(0)}, + sample{t: minutes(10), f: float64(0)}, + sample{t: minutes(12), f: float64(0)}, + sample{t: minutes(14), f: float64(0)}, + sample{t: minutes(16), f: float64(0)}, + sample{t: minutes(18), f: float64(0)}, }, { - sample{t: minutes(20), v: float64(1)}, - sample{t: minutes(22), v: float64(1)}, - sample{t: minutes(24), v: float64(1)}, - sample{t: minutes(26), v: float64(1)}, - sample{t: minutes(28), v: float64(1)}, + sample{t: minutes(20), f: float64(1)}, + sample{t: minutes(22), f: float64(1)}, + sample{t: minutes(24), f: float64(1)}, + sample{t: minutes(26), f: float64(1)}, + sample{t: minutes(28), f: float64(1)}, }, { - sample{t: minutes(30), v: float64(2)}, - sample{t: minutes(32), v: float64(2)}, - sample{t: minutes(34), v: float64(2)}, - sample{t: minutes(36), v: float64(2)}, - sample{t: minutes(38), v: float64(2)}, + sample{t: minutes(30), f: float64(2)}, + sample{t: minutes(32), f: float64(2)}, + sample{t: minutes(34), f: float64(2)}, + sample{t: minutes(36), f: float64(2)}, + sample{t: minutes(38), f: float64(2)}, }, { - sample{t: minutes(40), v: float64(3)}, - sample{t: minutes(42), v: float64(3)}, + sample{t: minutes(40), f: float64(3)}, + sample{t: minutes(42), f: float64(3)}, }, }, }, @@ -741,20 +741,20 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { firstInOrderSampleAt: minutes(120), inputSamples: tsdbutil.SampleSlice{ // Chunk 0 - sample{t: minutes(10), v: float64(0)}, - sample{t: minutes(15), v: float64(0)}, - sample{t: minutes(20), v: float64(0)}, - sample{t: minutes(25), v: float64(0)}, - sample{t: minutes(30), v: float64(0)}, + sample{t: minutes(10), f: float64(0)}, + sample{t: minutes(15), f: float64(0)}, + sample{t: minutes(20), f: float64(0)}, + sample{t: minutes(25), f: float64(0)}, + sample{t: minutes(30), f: float64(0)}, // Chunk 1 - sample{t: minutes(20), v: float64(1)}, - sample{t: minutes(25), v: float64(1)}, - sample{t: minutes(30), v: float64(1)}, - sample{t: minutes(35), v: float64(1)}, - sample{t: minutes(42), v: float64(1)}, + sample{t: minutes(20), f: float64(1)}, + sample{t: minutes(25), f: float64(1)}, + sample{t: minutes(30), f: float64(1)}, + sample{t: minutes(35), f: float64(1)}, + sample{t: minutes(42), f: float64(1)}, // Chunk 2 Head - sample{t: minutes(32), v: float64(2)}, - sample{t: minutes(50), v: float64(2)}, + sample{t: minutes(32), f: float64(2)}, + sample{t: minutes(50), f: float64(2)}, }, expChunkError: false, // ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100 @@ -765,15 +765,15 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { // Output Graphically [-----------------------------------] expChunksSamples: []tsdbutil.SampleSlice{ { - sample{t: minutes(10), v: float64(0)}, - sample{t: minutes(15), v: float64(0)}, - sample{t: minutes(20), v: float64(1)}, - sample{t: minutes(25), v: float64(1)}, - sample{t: minutes(30), v: float64(1)}, - sample{t: minutes(32), v: float64(2)}, - sample{t: minutes(35), v: float64(1)}, - sample{t: minutes(42), v: float64(1)}, - sample{t: minutes(50), v: float64(2)}, + sample{t: minutes(10), f: float64(0)}, + sample{t: minutes(15), f: float64(0)}, + sample{t: minutes(20), f: float64(1)}, + sample{t: minutes(25), f: float64(1)}, + sample{t: minutes(30), f: float64(1)}, + sample{t: minutes(32), f: float64(2)}, + sample{t: minutes(35), f: float64(1)}, + sample{t: minutes(42), f: float64(1)}, + sample{t: minutes(50), f: float64(2)}, }, }, }, @@ -784,20 +784,20 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { firstInOrderSampleAt: minutes(120), inputSamples: tsdbutil.SampleSlice{ // Chunk 0 - sample{t: minutes(10), v: float64(0)}, - sample{t: minutes(15), v: float64(0)}, - sample{t: minutes(20), v: float64(0)}, - sample{t: minutes(25), v: float64(0)}, - sample{t: minutes(30), v: float64(0)}, + sample{t: minutes(10), f: float64(0)}, + sample{t: minutes(15), f: float64(0)}, + sample{t: minutes(20), f: float64(0)}, + sample{t: minutes(25), f: float64(0)}, + sample{t: minutes(30), f: float64(0)}, // Chunk 1 - sample{t: minutes(20), v: float64(1)}, - sample{t: minutes(25), v: float64(1)}, - sample{t: minutes(30), v: float64(1)}, - sample{t: minutes(35), v: float64(1)}, - sample{t: minutes(42), v: float64(1)}, + sample{t: minutes(20), f: float64(1)}, + sample{t: minutes(25), f: float64(1)}, + sample{t: minutes(30), f: float64(1)}, + sample{t: minutes(35), f: float64(1)}, + sample{t: minutes(42), f: float64(1)}, // Chunk 2 Head - sample{t: minutes(32), v: float64(2)}, - sample{t: minutes(50), v: float64(2)}, + sample{t: minutes(32), f: float64(2)}, + sample{t: minutes(50), f: float64(2)}, }, expChunkError: false, // ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100 @@ -808,15 +808,15 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { // Output Graphically [-----------------------------------] expChunksSamples: []tsdbutil.SampleSlice{ { - sample{t: minutes(10), v: float64(0)}, - sample{t: minutes(15), v: float64(0)}, - sample{t: minutes(20), v: float64(1)}, - sample{t: minutes(25), v: float64(1)}, - sample{t: minutes(30), v: float64(1)}, - sample{t: minutes(32), v: float64(2)}, - sample{t: minutes(35), v: float64(1)}, - sample{t: minutes(42), v: float64(1)}, - sample{t: minutes(50), v: float64(2)}, + sample{t: minutes(10), f: float64(0)}, + sample{t: minutes(15), f: float64(0)}, + sample{t: minutes(20), f: float64(1)}, + sample{t: minutes(25), f: float64(1)}, + sample{t: minutes(30), f: float64(1)}, + sample{t: minutes(32), f: float64(2)}, + sample{t: minutes(35), f: float64(1)}, + sample{t: minutes(42), f: float64(1)}, + sample{t: minutes(50), f: float64(2)}, }, }, }, @@ -833,7 +833,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { // OOO few samples for s1. app = db.Appender(context.Background()) for _, s := range tc.inputSamples { - appendSample(app, s1, s.T(), s.V()) + appendSample(app, s1, s.T(), s.F()) } require.NoError(t, app.Commit()) @@ -855,7 +855,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { it := c.Iterator(nil) for it.Next() == chunkenc.ValFloat { t, v := it.At() - resultSamples = append(resultSamples, sample{t: t, v: v}) + resultSamples = append(resultSamples, sample{t: t, f: v}) } require.Equal(t, tc.expChunksSamples[i], resultSamples) } @@ -902,19 +902,19 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( firstInOrderSampleAt: minutes(120), initialSamples: tsdbutil.SampleSlice{ // Chunk 0 - sample{t: minutes(20), v: float64(0)}, - sample{t: minutes(22), v: float64(0)}, - sample{t: minutes(24), v: float64(0)}, - sample{t: minutes(26), v: float64(0)}, - sample{t: minutes(30), v: float64(0)}, + sample{t: minutes(20), f: float64(0)}, + sample{t: minutes(22), f: float64(0)}, + sample{t: minutes(24), f: float64(0)}, + sample{t: minutes(26), f: float64(0)}, + sample{t: minutes(30), f: float64(0)}, // Chunk 1 Head - sample{t: minutes(25), v: float64(1)}, - sample{t: minutes(35), v: float64(1)}, + sample{t: minutes(25), f: float64(1)}, + sample{t: minutes(35), f: float64(1)}, }, samplesAfterSeriesCall: tsdbutil.SampleSlice{ - sample{t: minutes(10), v: float64(1)}, - sample{t: minutes(32), v: float64(1)}, - sample{t: minutes(50), v: float64(1)}, + sample{t: minutes(10), f: float64(1)}, + sample{t: minutes(32), f: float64(1)}, + sample{t: minutes(50), f: float64(1)}, }, expChunkError: false, // ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100 @@ -926,14 +926,14 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( // Output Graphically [------------] (With 8 samples, samples newer than lastmint or older than lastmaxt are omitted but the ones in between are kept) expChunksSamples: []tsdbutil.SampleSlice{ { - sample{t: minutes(20), v: float64(0)}, - sample{t: minutes(22), v: float64(0)}, - sample{t: minutes(24), v: float64(0)}, - sample{t: minutes(25), v: float64(1)}, - sample{t: minutes(26), v: float64(0)}, - sample{t: minutes(30), v: float64(0)}, - sample{t: minutes(32), v: float64(1)}, // This sample was added after Series() but before Chunk() and its in between the lastmint and maxt so it should be kept - sample{t: minutes(35), v: float64(1)}, + sample{t: minutes(20), f: float64(0)}, + sample{t: minutes(22), f: float64(0)}, + sample{t: minutes(24), f: float64(0)}, + sample{t: minutes(25), f: float64(1)}, + sample{t: minutes(26), f: float64(0)}, + sample{t: minutes(30), f: float64(0)}, + sample{t: minutes(32), f: float64(1)}, // This sample was added after Series() but before Chunk() and its in between the lastmint and maxt so it should be kept + sample{t: minutes(35), f: float64(1)}, }, }, }, @@ -944,22 +944,22 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( firstInOrderSampleAt: minutes(120), initialSamples: tsdbutil.SampleSlice{ // Chunk 0 - sample{t: minutes(20), v: float64(0)}, - sample{t: minutes(22), v: float64(0)}, - sample{t: minutes(24), v: float64(0)}, - sample{t: minutes(26), v: float64(0)}, - sample{t: minutes(30), v: float64(0)}, + sample{t: minutes(20), f: float64(0)}, + sample{t: minutes(22), f: float64(0)}, + sample{t: minutes(24), f: float64(0)}, + sample{t: minutes(26), f: float64(0)}, + sample{t: minutes(30), f: float64(0)}, // Chunk 1 Head - sample{t: minutes(25), v: float64(1)}, - sample{t: minutes(35), v: float64(1)}, + sample{t: minutes(25), f: float64(1)}, + sample{t: minutes(35), f: float64(1)}, }, samplesAfterSeriesCall: tsdbutil.SampleSlice{ - sample{t: minutes(10), v: float64(1)}, - sample{t: minutes(32), v: float64(1)}, - sample{t: minutes(50), v: float64(1)}, + sample{t: minutes(10), f: float64(1)}, + sample{t: minutes(32), f: float64(1)}, + sample{t: minutes(50), f: float64(1)}, // Chunk 1 gets mmapped and Chunk 2, the new head is born - sample{t: minutes(25), v: float64(2)}, - sample{t: minutes(31), v: float64(2)}, + sample{t: minutes(25), f: float64(2)}, + sample{t: minutes(31), f: float64(2)}, }, expChunkError: false, // ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100 @@ -972,14 +972,14 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( // Output Graphically [------------] (8 samples) It has 5 from Chunk 0 and 3 from Chunk 1 expChunksSamples: []tsdbutil.SampleSlice{ { - sample{t: minutes(20), v: float64(0)}, - sample{t: minutes(22), v: float64(0)}, - sample{t: minutes(24), v: float64(0)}, - sample{t: minutes(25), v: float64(1)}, - sample{t: minutes(26), v: float64(0)}, - sample{t: minutes(30), v: float64(0)}, - sample{t: minutes(32), v: float64(1)}, // This sample was added after Series() but before Chunk() and its in between the lastmint and maxt so it should be kept - sample{t: minutes(35), v: float64(1)}, + sample{t: minutes(20), f: float64(0)}, + sample{t: minutes(22), f: float64(0)}, + sample{t: minutes(24), f: float64(0)}, + sample{t: minutes(25), f: float64(1)}, + sample{t: minutes(26), f: float64(0)}, + sample{t: minutes(30), f: float64(0)}, + sample{t: minutes(32), f: float64(1)}, // This sample was added after Series() but before Chunk() and its in between the lastmint and maxt so it should be kept + sample{t: minutes(35), f: float64(1)}, }, }, }, @@ -996,7 +996,7 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( // OOO few samples for s1. app = db.Appender(context.Background()) for _, s := range tc.initialSamples { - appendSample(app, s1, s.T(), s.V()) + appendSample(app, s1, s.T(), s.F()) } require.NoError(t, app.Commit()) @@ -1013,7 +1013,7 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( // OOO few samples for s1. app = db.Appender(context.Background()) for _, s := range tc.samplesAfterSeriesCall { - appendSample(app, s1, s.T(), s.V()) + appendSample(app, s1, s.T(), s.F()) } require.NoError(t, app.Commit()) @@ -1026,7 +1026,7 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( it := c.Iterator(nil) for it.Next() == chunkenc.ValFloat { ts, v := it.At() - resultSamples = append(resultSamples, sample{t: ts, v: v}) + resultSamples = append(resultSamples, sample{t: ts, f: v}) } require.Equal(t, tc.expChunksSamples[i], resultSamples) } diff --git a/tsdb/ooo_head_test.go b/tsdb/ooo_head_test.go index dcab28b61..691408744 100644 --- a/tsdb/ooo_head_test.go +++ b/tsdb/ooo_head_test.go @@ -52,7 +52,7 @@ func TestOOOInsert(t *testing.T) { chunk := NewOOOChunk() chunk.samples = makeEvenSampleSlice(numPreExisting) newSample := samplify(valOdd(insertPos)) - chunk.Insert(newSample.t, newSample.v) + chunk.Insert(newSample.t, newSample.f) var expSamples []sample // Our expected new samples slice, will be first the original samples. @@ -81,9 +81,9 @@ func TestOOOInsertDuplicate(t *testing.T) { chunk.samples = makeEvenSampleSlice(num) dupSample := chunk.samples[dupPos] - dupSample.v = 0.123 + dupSample.f = 0.123 - ok := chunk.Insert(dupSample.t, dupSample.v) + ok := chunk.Insert(dupSample.t, dupSample.f) expSamples := makeEvenSampleSlice(num) // We expect no change. require.False(t, ok) diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index cf9867a4f..fa3dd2418 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -132,7 +132,7 @@ func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkRe chunk := chunkenc.NewXORChunk() app, _ := chunk.Appender() for _, smpl := range chk { - app.Append(smpl.t, smpl.v) + app.Append(smpl.t, smpl.f) } chkReader[chunkRef] = chunk chunkRef++ @@ -479,7 +479,7 @@ func TestBlockQuerier_AgainstHeadWithOpenChunks(t *testing.T) { for _, s := range testData { for _, chk := range s.chunks { for _, sample := range chk { - _, err = app.Append(0, labels.FromMap(s.lset), sample.t, sample.v) + _, err = app.Append(0, labels.FromMap(s.lset), sample.t, sample.f) require.NoError(t, err) } } @@ -882,7 +882,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { if tc.seekSuccess { // After successful seek iterator is ready. Grab the value. t, v := it.At() - r = append(r, sample{t: t, v: v}) + r = append(r, sample{t: t, f: v}) } } expandedResult, err := storage.ExpandSamples(it, newSample) @@ -1054,8 +1054,8 @@ func TestDeletedIterator(t *testing.T) { act := make([]sample, 1000) for i := 0; i < 1000; i++ { act[i].t = int64(i) - act[i].v = rand.Float64() - app.Append(act[i].t, act[i].v) + act[i].f = rand.Float64() + app.Append(act[i].t, act[i].f) } cases := []struct { @@ -1090,7 +1090,7 @@ func TestDeletedIterator(t *testing.T) { ts, v := it.At() require.Equal(t, act[i].t, ts) - require.Equal(t, act[i].v, v) + require.Equal(t, act[i].f, v) } // There has been an extra call to Next(). i++ @@ -1114,8 +1114,8 @@ func TestDeletedIterator_WithSeek(t *testing.T) { act := make([]sample, 1000) for i := 0; i < 1000; i++ { act[i].t = int64(i) - act[i].v = float64(i) - app.Append(act[i].t, act[i].v) + act[i].f = float64(i) + app.Append(act[i].t, act[i].f) } cases := []struct { diff --git a/tsdb/tsdbutil/chunks.go b/tsdb/tsdbutil/chunks.go index 4e025c5e6..02a7dd619 100644 --- a/tsdb/tsdbutil/chunks.go +++ b/tsdb/tsdbutil/chunks.go @@ -28,7 +28,7 @@ type Samples interface { type Sample interface { T() int64 - V() float64 // TODO(beorn7): Rename to F(). + F() float64 H() *histogram.Histogram FH() *histogram.FloatHistogram Type() chunkenc.ValueType @@ -69,7 +69,7 @@ func ChunkFromSamplesGeneric(s Samples) chunks.Meta { for i := 0; i < s.Len(); i++ { switch sampleType { case chunkenc.ValFloat: - ca.Append(s.Get(i).T(), s.Get(i).V()) + ca.Append(s.Get(i).T(), s.Get(i).F()) case chunkenc.ValHistogram: ca.AppendHistogram(s.Get(i).T(), s.Get(i).H()) case chunkenc.ValFloatHistogram: @@ -87,7 +87,7 @@ func ChunkFromSamplesGeneric(s Samples) chunks.Meta { type sample struct { t int64 - v float64 + f float64 h *histogram.Histogram fh *histogram.FloatHistogram } @@ -96,8 +96,8 @@ func (s sample) T() int64 { return s.t } -func (s sample) V() float64 { - return s.v +func (s sample) F() float64 { + return s.f } func (s sample) H() *histogram.Histogram { @@ -123,7 +123,7 @@ func (s sample) Type() chunkenc.ValueType { func PopulatedChunk(numSamples int, minTime int64) chunks.Meta { samples := make([]Sample, numSamples) for i := 0; i < numSamples; i++ { - samples[i] = sample{t: minTime + int64(i*1000), v: 1.0} + samples[i] = sample{t: minTime + int64(i*1000), f: 1.0} } return ChunkFromSamples(samples) } @@ -133,7 +133,7 @@ func GenerateSamples(start, numSamples int) []Sample { return generateSamples(start, numSamples, func(i int) Sample { return sample{ t: int64(i), - v: float64(i), + f: float64(i), } }) } diff --git a/util/jsonutil/marshal.go b/util/jsonutil/marshal.go index 4400385d0..d715eabe6 100644 --- a/util/jsonutil/marshal.go +++ b/util/jsonutil/marshal.go @@ -45,12 +45,12 @@ func MarshalTimestamp(t int64, stream *jsoniter.Stream) { } // MarshalFloat marshals a float value using the passed jsoniter stream. -func MarshalFloat(v float64, stream *jsoniter.Stream) { +func MarshalFloat(f float64, stream *jsoniter.Stream) { stream.WriteRaw(`"`) // Taken from https://github.com/json-iterator/go/blob/master/stream_float.go#L71 as a workaround // to https://github.com/json-iterator/go/issues/365 (jsoniter, to follow json standard, doesn't allow inf/nan). buf := stream.Buffer() - abs := math.Abs(v) + abs := math.Abs(f) fmt := byte('f') // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. if abs != 0 { @@ -58,7 +58,7 @@ func MarshalFloat(v float64, stream *jsoniter.Stream) { fmt = 'e' } } - buf = strconv.AppendFloat(buf, v, fmt, -1, 64) + buf = strconv.AppendFloat(buf, f, fmt, -1, 64) stream.SetBuffer(buf) stream.WriteRaw(`"`) } diff --git a/web/federate.go b/web/federate.go index 15e31b1a3..b0f4c0610 100644 --- a/web/federate.go +++ b/web/federate.go @@ -132,7 +132,7 @@ Loop: t = sample.T() switch sample.Type() { case chunkenc.ValFloat: - f = sample.V() + f = sample.F() case chunkenc.ValHistogram: fh = sample.H().ToFloat() case chunkenc.ValFloatHistogram: From 551de0346fe157f0910b99a2c99e8285d96d1b1c Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 5 Apr 2023 14:31:05 +0200 Subject: [PATCH 072/632] promql: Do not return nil slices to the pool Signed-off-by: beorn7 --- promql/engine.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 4c6751088..b49be244f 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1874,7 +1874,9 @@ func getFPointSlice(sz int) []FPoint { } func putFPointSlice(p []FPoint) { - fPointPool.Put(p[:0]) + if p != nil { + fPointPool.Put(p[:0]) + } } func getHPointSlice(sz int) []HPoint { @@ -1885,7 +1887,9 @@ func getHPointSlice(sz int) []HPoint { } func putHPointSlice(p []HPoint) { - hPointPool.Put(p[:0]) + if p != nil { + hPointPool.Put(p[:0]) + } } // matrixSelector evaluates a *parser.MatrixSelector expression. From 717a3f8e2546863013e0b3284ae99fa4917115ef Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 13 Apr 2023 17:42:40 +0200 Subject: [PATCH 073/632] storage: Manually expand `genericAdd` for specific types This commit is doing what I would have expected that Go generics do for me. However, the PromQL benchmarks show a significant runtime and allocation increase with `genericAdd`, so this replaces it with hand-coded non-generic versions of it. Signed-off-by: beorn7 --- storage/buffer.go | 192 ++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 179 insertions(+), 13 deletions(-) diff --git a/storage/buffer.go b/storage/buffer.go index 0daadc044..27ac21661 100644 --- a/storage/buffer.go +++ b/storage/buffer.go @@ -401,17 +401,17 @@ func (r *sampleRing) add(s tsdbutil.Sample) { switch s := s.(type) { case fSample: if len(r.hBuf)+len(r.fhBuf) == 0 { - r.fBuf = genericAdd(s, r.fBuf, r) + r.fBuf = addF(s, r.fBuf, r) return } case hSample: if len(r.fBuf)+len(r.fhBuf) == 0 { - r.hBuf = genericAdd(s, r.hBuf, r) + r.hBuf = addH(s, r.hBuf, r) return } case fhSample: if len(r.fBuf)+len(r.hBuf) == 0 { - r.fhBuf = genericAdd(s, r.fhBuf, r) + r.fhBuf = addFH(s, r.fhBuf, r) return } } @@ -435,7 +435,7 @@ func (r *sampleRing) add(s tsdbutil.Sample) { r.fhBuf = nil } } - r.buf = genericAdd(s, r.buf, r) + r.buf = addSample(s, r.buf, r) } // addF is a version of the add method specialized for fSample. @@ -443,13 +443,13 @@ func (r *sampleRing) addF(s fSample) { switch { case len(r.buf) > 0: // Already have interface samples. Add to the interface buf. - r.buf = genericAdd[tsdbutil.Sample](s, r.buf, r) + r.buf = addSample(s, r.buf, r) case len(r.hBuf)+len(r.fhBuf) > 0: // Already have specialized samples that are not fSamples. // Need to call the checked add method for conversion. r.add(s) default: - r.fBuf = genericAdd(s, r.fBuf, r) + r.fBuf = addF(s, r.fBuf, r) } } @@ -458,13 +458,13 @@ func (r *sampleRing) addH(s hSample) { switch { case len(r.buf) > 0: // Already have interface samples. Add to the interface buf. - r.buf = genericAdd[tsdbutil.Sample](s, r.buf, r) + r.buf = addSample(s, r.buf, r) case len(r.fBuf)+len(r.fhBuf) > 0: // Already have samples that are not hSamples. // Need to call the checked add method for conversion. r.add(s) default: - r.hBuf = genericAdd(s, r.hBuf, r) + r.hBuf = addH(s, r.hBuf, r) } } @@ -473,25 +473,191 @@ func (r *sampleRing) addFH(s fhSample) { switch { case len(r.buf) > 0: // Already have interface samples. Add to the interface buf. - r.buf = genericAdd[tsdbutil.Sample](s, r.buf, r) + r.buf = addSample(s, r.buf, r) case len(r.fBuf)+len(r.hBuf) > 0: // Already have samples that are not fhSamples. // Need to call the checked add method for conversion. r.add(s) default: - r.fhBuf = genericAdd(s, r.fhBuf, r) + r.fhBuf = addFH(s, r.fhBuf, r) } } -func genericAdd[T tsdbutil.Sample](s T, buf []T, r *sampleRing) []T { +// genericAdd is a generic implementation of adding a tsdbutil.Sample +// implementation to a buffer of a sample ring. However, the Go compiler +// currently (go1.20) decides to not expand the code during compile time, but +// creates dynamic code to handle the different types. That has a significant +// overhead during runtime, noticeable in PromQL benchmarks. For example, the +// "RangeQuery/expr=rate(a_hundred[1d]),steps=.*" benchmarks show about 7% +// longer runtime, 9% higher allocation size, and 10% more allocations. +// Therefore, genericAdd has been manually implemented for all the types +// (addSample, addF, addH, addFH) below. +// +// func genericAdd[T tsdbutil.Sample](s T, buf []T, r *sampleRing) []T { +// l := len(buf) +// // Grow the ring buffer if it fits no more elements. +// if l == 0 { +// buf = make([]T, 16) +// l = 16 +// } +// if l == r.l { +// newBuf := make([]T, 2*l) +// copy(newBuf[l+r.f:], buf[r.f:]) +// copy(newBuf, buf[:r.f]) +// +// buf = newBuf +// r.i = r.f +// r.f += l +// l = 2 * l +// } else { +// r.i++ +// if r.i >= l { +// r.i -= l +// } +// } +// +// buf[r.i] = s +// r.l++ +// +// // Free head of the buffer of samples that just fell out of the range. +// tmin := s.T() - r.delta +// for buf[r.f].T() < tmin { +// r.f++ +// if r.f >= l { +// r.f -= l +// } +// r.l-- +// } +// return buf +// } + +// addSample is a handcoded specialization of genericAdd (see above). +func addSample(s tsdbutil.Sample, buf []tsdbutil.Sample, r *sampleRing) []tsdbutil.Sample { l := len(buf) // Grow the ring buffer if it fits no more elements. if l == 0 { - buf = make([]T, 16) + buf = make([]tsdbutil.Sample, 16) l = 16 } if l == r.l { - newBuf := make([]T, 2*l) + newBuf := make([]tsdbutil.Sample, 2*l) + copy(newBuf[l+r.f:], buf[r.f:]) + copy(newBuf, buf[:r.f]) + + buf = newBuf + r.i = r.f + r.f += l + l = 2 * l + } else { + r.i++ + if r.i >= l { + r.i -= l + } + } + + buf[r.i] = s + r.l++ + + // Free head of the buffer of samples that just fell out of the range. + tmin := s.T() - r.delta + for buf[r.f].T() < tmin { + r.f++ + if r.f >= l { + r.f -= l + } + r.l-- + } + return buf +} + +// addF is a handcoded specialization of genericAdd (see above). +func addF(s fSample, buf []fSample, r *sampleRing) []fSample { + l := len(buf) + // Grow the ring buffer if it fits no more elements. + if l == 0 { + buf = make([]fSample, 16) + l = 16 + } + if l == r.l { + newBuf := make([]fSample, 2*l) + copy(newBuf[l+r.f:], buf[r.f:]) + copy(newBuf, buf[:r.f]) + + buf = newBuf + r.i = r.f + r.f += l + l = 2 * l + } else { + r.i++ + if r.i >= l { + r.i -= l + } + } + + buf[r.i] = s + r.l++ + + // Free head of the buffer of samples that just fell out of the range. + tmin := s.T() - r.delta + for buf[r.f].T() < tmin { + r.f++ + if r.f >= l { + r.f -= l + } + r.l-- + } + return buf +} + +// addH is a handcoded specialization of genericAdd (see above). +func addH(s hSample, buf []hSample, r *sampleRing) []hSample { + l := len(buf) + // Grow the ring buffer if it fits no more elements. + if l == 0 { + buf = make([]hSample, 16) + l = 16 + } + if l == r.l { + newBuf := make([]hSample, 2*l) + copy(newBuf[l+r.f:], buf[r.f:]) + copy(newBuf, buf[:r.f]) + + buf = newBuf + r.i = r.f + r.f += l + l = 2 * l + } else { + r.i++ + if r.i >= l { + r.i -= l + } + } + + buf[r.i] = s + r.l++ + + // Free head of the buffer of samples that just fell out of the range. + tmin := s.T() - r.delta + for buf[r.f].T() < tmin { + r.f++ + if r.f >= l { + r.f -= l + } + r.l-- + } + return buf +} + +// addFH is a handcoded specialization of genericAdd (see above). +func addFH(s fhSample, buf []fhSample, r *sampleRing) []fhSample { + l := len(buf) + // Grow the ring buffer if it fits no more elements. + if l == 0 { + buf = make([]fhSample, 16) + l = 16 + } + if l == r.l { + newBuf := make([]fhSample, 2*l) copy(newBuf[l+r.f:], buf[r.f:]) copy(newBuf, buf[:r.f]) From fb3eb212306cad9c9552ccef60318d5c3c8fff19 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Sun, 9 Apr 2023 09:08:40 +0200 Subject: [PATCH 074/632] enable gocritic, unconvert and unused linters Signed-off-by: Matthieu MOREL --- .github/workflows/ci.yml | 2 ++ .golangci.yml | 11 ++++++- cmd/prometheus/main.go | 2 +- cmd/prometheus/query_log_test.go | 2 +- cmd/promtool/backfill.go | 2 +- cmd/promtool/rules.go | 2 +- cmd/promtool/unittest.go | 2 +- discovery/kubernetes/endpointslice.go | 2 +- discovery/kubernetes/kubernetes.go | 7 +++-- discovery/legacymanager/registry.go | 2 +- discovery/linode/linode.go | 8 ++--- discovery/marathon/marathon.go | 7 +++-- discovery/ovhcloud/dedicated_server_test.go | 6 ++-- discovery/ovhcloud/vps_test.go | 6 ++-- discovery/registry.go | 2 +- discovery/vultr/vultr.go | 4 +-- .../remote_storage_adapter/influxdb/client.go | 11 ++++--- model/labels/labels.go | 14 +++++---- model/textparse/promparse_test.go | 2 +- promql/bench_test.go | 6 ++-- promql/engine.go | 24 +++++++------- promql/engine_test.go | 2 +- promql/functions.go | 8 ++--- promql/functions_test.go | 2 +- promql/parser/ast.go | 11 ++++--- promql/parser/parse.go | 14 ++++----- promql/parser/parse_test.go | 2 +- promql/parser/printer.go | 21 +++++++------ rules/manager.go | 7 +++-- rules/manager_test.go | 7 +++-- scrape/scrape.go | 6 ++-- scrape/scrape_test.go | 27 ++++++++-------- scrape/target.go | 4 +-- storage/merge.go | 7 +++-- storage/remote/codec.go | 27 ++++++++-------- storage/remote/queue_manager.go | 8 ++--- template/template.go | 2 +- tsdb/agent/db_test.go | 3 +- tsdb/block_test.go | 4 +-- tsdb/chunkenc/bstream.go | 6 ++-- tsdb/chunkenc/float_histogram.go | 2 +- tsdb/chunkenc/histogram.go | 6 ++-- tsdb/chunkenc/varbit.go | 2 +- tsdb/chunkenc/xor.go | 12 +++---- tsdb/chunks/head_chunks_test.go | 4 +-- tsdb/compact.go | 2 +- tsdb/compact_test.go | 6 ---- tsdb/db.go | 6 ++-- tsdb/db_test.go | 4 +-- tsdb/exemplar.go | 18 +++++------ tsdb/head.go | 2 +- tsdb/head_test.go | 31 ++++++++++--------- tsdb/head_wal.go | 4 +-- tsdb/index/index.go | 2 +- tsdb/index/postings.go | 13 ++++---- tsdb/index/postingsstats.go | 6 ++-- tsdb/isolation.go | 2 +- tsdb/querier.go | 26 +++++++++------- tsdb/querier_test.go | 4 +-- tsdb/wal.go | 4 +-- tsdb/wlog/wlog_test.go | 4 +-- util/runtime/limits_default.go | 2 +- util/runtime/statfs_default.go | 8 +++-- web/api/v1/api.go | 4 +-- web/api/v1/api_test.go | 4 +-- web/web.go | 4 +-- 66 files changed, 245 insertions(+), 229 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 17185c5bc..07b1242c2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -149,6 +149,8 @@ jobs: - name: Lint uses: golangci/golangci-lint-action@v3.4.0 with: + args: --verbose + skip-cache: true version: v1.51.2 fuzzing: uses: ./.github/workflows/fuzzing.yml diff --git a/.golangci.yml b/.golangci.yml index efa6b2044..c0c20d425 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,5 +1,5 @@ run: - deadline: 5m + timeout: 15m skip-files: # Skip autogenerated files. - ^.*\.(pb|y)\.go$ @@ -10,14 +10,23 @@ output: linters: enable: - depguard + - gocritic - gofumpt - goimports - revive - misspell + - unconvert + - unused issues: max-same-issues: 0 exclude-rules: + - linters: + - gocritic + text: "appendAssign" + - linters: + - gocritic + text: "singleCaseSwitch" - path: _test.go linters: - errcheck diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index f4f6af20d..cbe8f503d 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -490,7 +490,7 @@ func main() { if cfgFile.StorageConfig.ExemplarsConfig == nil { cfgFile.StorageConfig.ExemplarsConfig = &config.DefaultExemplarsConfig } - cfg.tsdb.MaxExemplars = int64(cfgFile.StorageConfig.ExemplarsConfig.MaxExemplars) + cfg.tsdb.MaxExemplars = cfgFile.StorageConfig.ExemplarsConfig.MaxExemplars } if cfgFile.StorageConfig.TSDBConfig != nil { cfg.tsdb.OutOfOrderTimeWindow = cfgFile.StorageConfig.TSDBConfig.OutOfOrderTimeWindow diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go index d5dfbea50..f20f2a22c 100644 --- a/cmd/prometheus/query_log_test.go +++ b/cmd/prometheus/query_log_test.go @@ -193,7 +193,7 @@ func (p *queryLogTest) String() string { } name = name + ", " + p.host + ":" + strconv.Itoa(p.port) if p.enabledAtStart { - name = name + ", enabled at start" + name += ", enabled at start" } if p.prefix != "" { name = name + ", with prefix " + p.prefix diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go index 3c23d2c03..39410881b 100644 --- a/cmd/promtool/backfill.go +++ b/cmd/promtool/backfill.go @@ -101,7 +101,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn nextSampleTs int64 = math.MaxInt64 ) - for t := mint; t <= maxt; t = t + blockDuration { + for t := mint; t <= maxt; t += blockDuration { tsUpper := t + blockDuration if nextSampleTs != math.MaxInt64 && nextSampleTs >= tsUpper { // The next sample is not in this timerange, we can avoid parsing diff --git a/cmd/promtool/rules.go b/cmd/promtool/rules.go index aedc7bcb9..e430fe189 100644 --- a/cmd/promtool/rules.go +++ b/cmd/promtool/rules.go @@ -100,7 +100,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName startInMs := start.Unix() * int64(time.Second/time.Millisecond) endInMs := end.Unix() * int64(time.Second/time.Millisecond) - for startOfBlock := blockDuration * (startInMs / blockDuration); startOfBlock <= endInMs; startOfBlock = startOfBlock + blockDuration { + for startOfBlock := blockDuration * (startInMs / blockDuration); startOfBlock <= endInMs; startOfBlock += blockDuration { endOfBlock := startOfBlock + blockDuration - 1 currStart := max(startOfBlock/int64(time.Second/time.Millisecond), start.Unix()) diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index b3e6f67f9..84dfd9ec7 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -130,7 +130,7 @@ func resolveAndGlobFilepaths(baseDir string, utf *unitTestFile) error { if err != nil { return err } - if len(m) <= 0 { + if len(m) == 0 { fmt.Fprintln(os.Stderr, " WARNING: no file match pattern", rf) } globbedFiles = append(globbedFiles, m...) diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index 135735154..d5bff8a5f 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -300,7 +300,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou } if port.protocol() != nil { - target[endpointSlicePortProtocolLabel] = lv(string(*port.protocol())) + target[endpointSlicePortProtocolLabel] = lv(*port.protocol()) } if port.port() != nil { diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index 0f03e2cdb..a44bd513c 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -299,12 +299,13 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) { err error ownNamespace string ) - if conf.KubeConfig != "" { + switch { + case conf.KubeConfig != "": kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig) if err != nil { return nil, err } - } else if conf.APIServer.URL == nil { + case conf.APIServer.URL == nil: // Use the Kubernetes provided pod service account // as described in https://kubernetes.io/docs/admin/service-accounts-admin/ kcfg, err = rest.InClusterConfig() @@ -324,7 +325,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) { } level.Info(l).Log("msg", "Using pod service account via in-cluster config") - } else { + default: rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd") if err != nil { return nil, err diff --git a/discovery/legacymanager/registry.go b/discovery/legacymanager/registry.go index 687f09382..955705394 100644 --- a/discovery/legacymanager/registry.go +++ b/discovery/legacymanager/registry.go @@ -254,7 +254,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error { oldStr := oldTyp.String() newStr := newTyp.String() for i, s := range e.Errors { - e.Errors[i] = strings.Replace(s, oldStr, newStr, -1) + e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr) } } return err diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index 0fd0a2c37..12b957514 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -249,20 +249,20 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro if detailedIP.Address != ip.String() { continue } - - if detailedIP.Public && publicIPv4 == "" { + switch { + case detailedIP.Public && publicIPv4 == "": publicIPv4 = detailedIP.Address if detailedIP.RDNS != "" && detailedIP.RDNS != "null" { publicIPv4RDNS = detailedIP.RDNS } - } else if !detailedIP.Public && privateIPv4 == "" { + case !detailedIP.Public && privateIPv4 == "": privateIPv4 = detailedIP.Address if detailedIP.RDNS != "" && detailedIP.RDNS != "null" { privateIPv4RDNS = detailedIP.RDNS } - } else { + default: extraIPs = append(extraIPs, detailedIP.Address) } } diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index 079f93ad0..c31daee1f 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -400,19 +400,20 @@ func targetsForApp(app *app) []model.LabelSet { var labels []map[string]string var prefix string - if len(app.Container.PortMappings) != 0 { + switch { + case len(app.Container.PortMappings) != 0: // In Marathon 1.5.x the "container.docker.portMappings" object was moved // to "container.portMappings". ports, labels = extractPortMapping(app.Container.PortMappings, app.isContainerNet()) prefix = portMappingLabelPrefix - } else if len(app.Container.Docker.PortMappings) != 0 { + case len(app.Container.Docker.PortMappings) != 0: // Prior to Marathon 1.5 the port mappings could be found at the path // "container.docker.portMappings". ports, labels = extractPortMapping(app.Container.Docker.PortMappings, app.isContainerNet()) prefix = portMappingLabelPrefix - } else if len(app.PortDefinitions) != 0 { + case len(app.PortDefinitions) != 0: // PortDefinitions deprecates the "ports" array and can be used to specify // a list of ports with metadata in case a mapping is not required. ports = make([]uint32, len(app.PortDefinitions)) diff --git a/discovery/ovhcloud/dedicated_server_test.go b/discovery/ovhcloud/dedicated_server_test.go index 03a01005a..e8ffa4a28 100644 --- a/discovery/ovhcloud/dedicated_server_test.go +++ b/discovery/ovhcloud/dedicated_server_test.go @@ -84,7 +84,7 @@ func MockDedicatedAPI(w http.ResponseWriter, r *http.Request) { return } w.Header().Set("Content-Type", "application/json") - if string(r.URL.Path) == "/dedicated/server" { + if r.URL.Path == "/dedicated/server" { dedicatedServersList, err := os.ReadFile("testdata/dedicated_server/dedicated_servers.json") if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) @@ -96,7 +96,7 @@ func MockDedicatedAPI(w http.ResponseWriter, r *http.Request) { return } } - if string(r.URL.Path) == "/dedicated/server/abcde" { + if r.URL.Path == "/dedicated/server/abcde" { dedicatedServer, err := os.ReadFile("testdata/dedicated_server/dedicated_servers_details.json") if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) @@ -108,7 +108,7 @@ func MockDedicatedAPI(w http.ResponseWriter, r *http.Request) { return } } - if string(r.URL.Path) == "/dedicated/server/abcde/ips" { + if r.URL.Path == "/dedicated/server/abcde/ips" { dedicatedServerIPs, err := os.ReadFile("testdata/dedicated_server/dedicated_servers_abcde_ips.json") if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) diff --git a/discovery/ovhcloud/vps_test.go b/discovery/ovhcloud/vps_test.go index 31b30fdfc..b1177f215 100644 --- a/discovery/ovhcloud/vps_test.go +++ b/discovery/ovhcloud/vps_test.go @@ -91,7 +91,7 @@ func MockVpsAPI(w http.ResponseWriter, r *http.Request) { return } w.Header().Set("Content-Type", "application/json") - if string(r.URL.Path) == "/vps" { + if r.URL.Path == "/vps" { dedicatedServersList, err := os.ReadFile("testdata/vps/vps.json") if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) @@ -103,7 +103,7 @@ func MockVpsAPI(w http.ResponseWriter, r *http.Request) { return } } - if string(r.URL.Path) == "/vps/abc" { + if r.URL.Path == "/vps/abc" { dedicatedServer, err := os.ReadFile("testdata/vps/vps_details.json") if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) @@ -115,7 +115,7 @@ func MockVpsAPI(w http.ResponseWriter, r *http.Request) { return } } - if string(r.URL.Path) == "/vps/abc/ips" { + if r.URL.Path == "/vps/abc/ips" { dedicatedServerIPs, err := os.ReadFile("testdata/vps/vps_abc_ips.json") if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) diff --git a/discovery/registry.go b/discovery/registry.go index 8274628c2..13168a07a 100644 --- a/discovery/registry.go +++ b/discovery/registry.go @@ -253,7 +253,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error { oldStr := oldTyp.String() newStr := newTyp.String() for i, s := range e.Errors { - e.Errors[i] = strings.Replace(s, oldStr, newStr, -1) + e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr) } } return err diff --git a/discovery/vultr/vultr.go b/discovery/vultr/vultr.go index 2f489e7d4..42881d3c1 100644 --- a/discovery/vultr/vultr.go +++ b/discovery/vultr/vultr.go @@ -202,10 +202,8 @@ func (d *Discovery) listInstances(ctx context.Context) ([]govultr.Instance, erro if meta.Links.Next == "" { break - } else { - listOptions.Cursor = meta.Links.Next - continue } + listOptions.Cursor = meta.Links.Next } return instances, nil diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go index fffbc9c2a..e84ed9e12 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go @@ -184,11 +184,11 @@ func (c *Client) buildCommand(q *prompb.Query) (string, error) { } func escapeSingleQuotes(str string) string { - return strings.Replace(str, `'`, `\'`, -1) + return strings.ReplaceAll(str, `'`, `\'`) } func escapeSlashes(str string) string { - return strings.Replace(str, `/`, `\/`, -1) + return strings.ReplaceAll(str, `/`, `\/`) } func mergeResult(labelsToSeries map[string]*prompb.TimeSeries, results []influx.Result) error { @@ -290,13 +290,14 @@ func mergeSamples(a, b []prompb.Sample) []prompb.Sample { result := make([]prompb.Sample, 0, len(a)+len(b)) i, j := 0, 0 for i < len(a) && j < len(b) { - if a[i].Timestamp < b[j].Timestamp { + switch { + case a[i].Timestamp < b[j].Timestamp: result = append(result, a[i]) i++ - } else if a[i].Timestamp > b[j].Timestamp { + case a[i].Timestamp > b[j].Timestamp: result = append(result, b[j]) j++ - } else { + default: result = append(result, a[i]) i++ j++ diff --git a/model/labels/labels.go b/model/labels/labels.go index b7398d17f..93524ddcf 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -169,11 +169,12 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { b = b[:0] i, j := 0, 0 for i < len(ls) && j < len(names) { - if names[j] < ls[i].Name { + switch { + case names[j] < ls[i].Name: j++ - } else if ls[i].Name < names[j] { + case ls[i].Name < names[j]: i++ - } else { + default: b = append(b, ls[i].Name...) b = append(b, seps[0]) b = append(b, ls[i].Value...) @@ -213,11 +214,12 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { b.WriteByte(labelSep) i, j := 0, 0 for i < len(ls) && j < len(names) { - if names[j] < ls[i].Name { + switch { + case names[j] < ls[i].Name: j++ - } else if ls[i].Name < names[j] { + case ls[i].Name < names[j]: i++ - } else { + default: if b.Len() > 1 { b.WriteByte(seps[0]) } diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index e0ecf62f5..280f39b4f 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -512,7 +512,7 @@ func BenchmarkGzip(b *testing.B) { k := b.N / promtestdataSampleCount b.ReportAllocs() - b.SetBytes(int64(n) / promtestdataSampleCount) + b.SetBytes(n / promtestdataSampleCount) b.ResetTimer() total := 0 diff --git a/promql/bench_test.go b/promql/bench_test.go index 88025d932..d197da888 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -194,9 +194,9 @@ func rangeQueryCases() []benchCase { if !strings.Contains(c.expr, "X") { tmp = append(tmp, c) } else { - tmp = append(tmp, benchCase{expr: strings.Replace(c.expr, "X", "one", -1), steps: c.steps}) - tmp = append(tmp, benchCase{expr: strings.Replace(c.expr, "X", "ten", -1), steps: c.steps}) - tmp = append(tmp, benchCase{expr: strings.Replace(c.expr, "X", "hundred", -1), steps: c.steps}) + tmp = append(tmp, benchCase{expr: strings.ReplaceAll(c.expr, "X", "one"), steps: c.steps}) + tmp = append(tmp, benchCase{expr: strings.ReplaceAll(c.expr, "X", "ten"), steps: c.steps}) + tmp = append(tmp, benchCase{expr: strings.ReplaceAll(c.expr, "X", "hundred"), steps: c.steps}) } } cases = tmp diff --git a/promql/engine.go b/promql/engine.go index b49be244f..4dfa6b119 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -783,7 +783,6 @@ func (ng *Engine) findMinMaxTime(s *parser.EvalStmt) (int64, int64) { maxTimestamp = end } evalRange = 0 - case *parser.MatrixSelector: evalRange = n.Range } @@ -816,20 +815,20 @@ func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorS } else { offsetMilliseconds := durationMilliseconds(subqOffset) start = start - offsetMilliseconds - durationMilliseconds(subqRange) - end = end - offsetMilliseconds + end -= offsetMilliseconds } if evalRange == 0 { - start = start - durationMilliseconds(s.LookbackDelta) + start -= durationMilliseconds(s.LookbackDelta) } else { // For all matrix queries we want to ensure that we have (end-start) + range selected // this way we have `range` data before the start time - start = start - durationMilliseconds(evalRange) + start -= durationMilliseconds(evalRange) } offsetMilliseconds := durationMilliseconds(n.OriginalOffset) - start = start - offsetMilliseconds - end = end - offsetMilliseconds + start -= offsetMilliseconds + end -= offsetMilliseconds return start, end } @@ -1745,7 +1744,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { res, ws := newEv.eval(e.Expr) ev.currentSamples = newEv.currentSamples ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats) - for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts = ts + ev.interval { + for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { step++ ev.samplesStats.IncrementSamplesAtStep(step, newEv.samplesStats.TotalSamples) } @@ -1767,7 +1766,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { if len(mat[i].Floats)+len(mat[i].Histograms) != 1 { panic(fmt.Errorf("unexpected number of samples")) } - for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts = ts + ev.interval { + for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts += ev.interval { if len(mat[i].Floats) > 0 { mat[i].Floats = append(mat[i].Floats, FPoint{ T: ts, @@ -2514,14 +2513,15 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without if !ok { var m labels.Labels enh.resetBuilder(metric) - if without { + switch { + case without: enh.lb.Del(grouping...) enh.lb.Del(labels.MetricName) m = enh.lb.Labels() - } else if len(grouping) > 0 { + case len(grouping) > 0: enh.lb.Keep(grouping...) m = enh.lb.Labels() - } else { + default: m = labels.EmptyLabels() } newAgg := &groupedAggregation{ @@ -2689,7 +2689,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without aggr.floatValue = float64(aggr.groupCount) case parser.STDVAR: - aggr.floatValue = aggr.floatValue / float64(aggr.groupCount) + aggr.floatValue /= float64(aggr.groupCount) case parser.STDDEV: aggr.floatValue = math.Sqrt(aggr.floatValue / float64(aggr.groupCount)) diff --git a/promql/engine_test.go b/promql/engine_test.go index b64e32ba4..056fd2355 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -3267,7 +3267,7 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { require.Len(t, vector, 1) require.Nil(t, vector[0].H) if floatHisto { - require.Equal(t, float64(h.ToFloat().Count), vector[0].F) + require.Equal(t, h.ToFloat().Count, vector[0].F) } else { require.Equal(t, float64(h.Count), vector[0].F) } diff --git a/promql/functions.go b/promql/functions.go index fd99703df..0e7a601e3 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -877,10 +877,10 @@ func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept f } return 0, initY } - sumX = sumX + cX - sumY = sumY + cY - sumXY = sumXY + cXY - sumX2 = sumX2 + cX2 + sumX += cX + sumY += cY + sumXY += cXY + sumX2 += cX2 covXY := sumXY - sumX*sumY/n varX := sumX2 - sumX*sumX/n diff --git a/promql/functions_test.go b/promql/functions_test.go index e552424b3..8181481c0 100644 --- a/promql/functions_test.go +++ b/promql/functions_test.go @@ -51,7 +51,7 @@ func TestDeriv(t *testing.T) { // https://github.com/prometheus/prometheus/issues/7180 for i = 0; i < 15; i++ { jitter := 12 * i % 2 - a.Append(0, metric, int64(start+interval*i+jitter), 1) + a.Append(0, metric, start+interval*i+jitter, 1) } require.NoError(t, a.Commit()) diff --git a/promql/parser/ast.go b/promql/parser/ast.go index 190af2d59..86f139499 100644 --- a/promql/parser/ast.go +++ b/promql/parser/ast.go @@ -349,7 +349,7 @@ func (f inspector) Visit(node Node, path []Node) (Visitor, error) { // for all the non-nil children of node, recursively. func Inspect(node Node, f inspector) { //nolint: errcheck - Walk(inspector(f), node, nil) + Walk(f, node, nil) } // Children returns a list of all child nodes of a syntax tree node. @@ -368,13 +368,14 @@ func Children(node Node) []Node { case *AggregateExpr: // While this does not look nice, it should avoid unnecessary allocations // caused by slice resizing - if n.Expr == nil && n.Param == nil { + switch { + case n.Expr == nil && n.Param == nil: return nil - } else if n.Expr == nil { + case n.Expr == nil: return []Node{n.Param} - } else if n.Param == nil { + case n.Param == nil: return []Node{n.Expr} - } else { + default: return []Node{n.Expr, n.Param} } case *BinaryExpr: diff --git a/promql/parser/parse.go b/promql/parser/parse.go index 6c37ce6fc..fa28097b2 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -523,15 +523,13 @@ func (p *parser) checkAST(node Node) (typ ValueType) { p.addParseErrf(n.PositionRange(), "vector matching only allowed between instant vectors") } n.VectorMatching = nil - } else { // Both operands are Vectors. - if n.Op.IsSetOperator() { - if n.VectorMatching.Card == CardOneToMany || n.VectorMatching.Card == CardManyToOne { - p.addParseErrf(n.PositionRange(), "no grouping allowed for %q operation", n.Op) - } - if n.VectorMatching.Card != CardManyToMany { - p.addParseErrf(n.PositionRange(), "set operations must always be many-to-many") - } + } else if n.Op.IsSetOperator() { + if n.VectorMatching.Card == CardOneToMany || n.VectorMatching.Card == CardManyToOne { + p.addParseErrf(n.PositionRange(), "no grouping allowed for %q operation", n.Op) + } + if n.VectorMatching.Card != CardManyToMany { + p.addParseErrf(n.PositionRange(), "set operations must always be many-to-many") } } diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index df66d9381..7e6870ddb 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -3592,7 +3592,7 @@ func TestNaNExpression(t *testing.T) { nl, ok := expr.(*NumberLiteral) require.True(t, ok, "expected number literal but got %T", expr) - require.True(t, math.IsNaN(float64(nl.Val)), "expected 'NaN' in number literal but got %v", nl.Val) + require.True(t, math.IsNaN(nl.Val), "expected 'NaN' in number literal but got %v", nl.Val) } var testSeries = []struct { diff --git a/promql/parser/printer.go b/promql/parser/printer.go index 1f15eeef3..4fff193e1 100644 --- a/promql/parser/printer.go +++ b/promql/parser/printer.go @@ -130,11 +130,12 @@ func (node *MatrixSelector) String() string { offset = fmt.Sprintf(" offset -%s", model.Duration(-vecSelector.OriginalOffset)) } at := "" - if vecSelector.Timestamp != nil { + switch { + case vecSelector.Timestamp != nil: at = fmt.Sprintf(" @ %.3f", float64(*vecSelector.Timestamp)/1000.0) - } else if vecSelector.StartOrEnd == START { + case vecSelector.StartOrEnd == START: at = " @ start()" - } else if vecSelector.StartOrEnd == END { + case vecSelector.StartOrEnd == END: at = " @ end()" } @@ -168,11 +169,12 @@ func (node *SubqueryExpr) getSubqueryTimeSuffix() string { offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset)) } at := "" - if node.Timestamp != nil { + switch { + case node.Timestamp != nil: at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0) - } else if node.StartOrEnd == START { + case node.StartOrEnd == START: at = " @ start()" - } else if node.StartOrEnd == END { + case node.StartOrEnd == END: at = " @ end()" } return fmt.Sprintf("[%s:%s]%s%s", model.Duration(node.Range), step, at, offset) @@ -213,11 +215,12 @@ func (node *VectorSelector) String() string { offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset)) } at := "" - if node.Timestamp != nil { + switch { + case node.Timestamp != nil: at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0) - } else if node.StartOrEnd == START { + case node.StartOrEnd == START: at = " @ start()" - } else if node.StartOrEnd == END { + case node.StartOrEnd == END: at = " @ end()" } diff --git a/rules/manager.go b/rules/manager.go index 07d50be1b..82bbfd394 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -866,12 +866,13 @@ func (g *Group) RestoreForState(ts time.Time) { timeSpentPending := downAt.Sub(restoredActiveAt) timeRemainingPending := alertHoldDuration - timeSpentPending - if timeRemainingPending <= 0 { + switch { + case timeRemainingPending <= 0: // It means that alert was firing when prometheus went down. // In the next Eval, the state of this alert will be set back to // firing again if it's still firing in that Eval. // Nothing to be done in this case. - } else if timeRemainingPending < g.opts.ForGracePeriod { + case timeRemainingPending < g.opts.ForGracePeriod: // (new) restoredActiveAt = (ts + m.opts.ForGracePeriod) - alertHoldDuration // /* new firing time */ /* moving back by hold duration */ // @@ -884,7 +885,7 @@ func (g *Group) RestoreForState(ts time.Time) { // = (ts + m.opts.ForGracePeriod) - ts // = m.opts.ForGracePeriod restoredActiveAt = ts.Add(g.opts.ForGracePeriod).Add(-alertHoldDuration) - } else { + default: // By shifting ActiveAt to the future (ActiveAt + some_duration), // the total pending time from the original ActiveAt // would be `alertHoldDuration + some_duration`. diff --git a/rules/manager_test.go b/rules/manager_test.go index 440e06c9a..85a74ac52 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -481,17 +481,18 @@ func TestForStateRestore(t *testing.T) { }) // Checking if we have restored it correctly. - if tst.noRestore { + switch { + case tst.noRestore: require.Equal(t, tst.num, len(got)) for _, e := range got { require.Equal(t, e.ActiveAt, restoreTime) } - } else if tst.gracePeriod { + case tst.gracePeriod: require.Equal(t, tst.num, len(got)) for _, e := range got { require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime)) } - } else { + default: exp := tst.alerts require.Equal(t, len(exp), len(got)) sortAlerts(exp) diff --git a/scrape/scrape.go b/scrape/scrape.go index f38527ff3..15c886793 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -640,7 +640,7 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error { met := lset.Get(labels.MetricName) if limits.labelLimit > 0 { nbLabels := lset.Len() - if nbLabels > int(limits.labelLimit) { + if nbLabels > limits.labelLimit { return fmt.Errorf("label_limit exceeded (metric: %.50s, number of labels: %d, limit: %d)", met, nbLabels, limits.labelLimit) } } @@ -652,14 +652,14 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error { return lset.Validate(func(l labels.Label) error { if limits.labelNameLengthLimit > 0 { nameLength := len(l.Name) - if nameLength > int(limits.labelNameLengthLimit) { + if nameLength > limits.labelNameLengthLimit { return fmt.Errorf("label_name_length_limit exceeded (metric: %.50s, label name: %.50s, length: %d, limit: %d)", met, l.Name, nameLength, limits.labelNameLengthLimit) } } if limits.labelValueLengthLimit > 0 { valueLength := len(l.Value) - if valueLength > int(limits.labelValueLengthLimit) { + if valueLength > limits.labelValueLengthLimit { return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label name: %.50s, value: %.50q, length: %d, limit: %d)", met, l.Name, l.Value, valueLength, limits.labelValueLengthLimit) } } diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index dcb3b48c1..07b9c3c87 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -322,7 +322,7 @@ func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) { ScrapeTimeout: model.Duration(2 * time.Second), } newLoop := func(opts scrapeLoopOptions) loop { - l := &testLoop{interval: time.Duration(opts.interval), timeout: time.Duration(opts.timeout)} + l := &testLoop{interval: opts.interval, timeout: opts.timeout} l.startFunc = func(interval, timeout time.Duration, errc chan<- error) { require.Equal(t, 5*time.Second, interval, "Unexpected scrape interval") require.Equal(t, 3*time.Second, timeout, "Unexpected scrape timeout") @@ -546,7 +546,7 @@ func TestScrapePoolRaces(t *testing.T) { require.Equal(t, expectedDropped, len(dropped), "Invalid number of dropped targets") for i := 0; i < 20; i++ { - time.Sleep(time.Duration(10 * time.Millisecond)) + time.Sleep(10 * time.Millisecond) sp.reload(newConfig()) } sp.stop() @@ -1199,14 +1199,14 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { // Succeed once, several failures, then stop. scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { numScrapes++ - - if numScrapes == 1 { + switch { + case numScrapes == 1: w.Write([]byte("metric_a 42\n")) return nil - } else if numScrapes == 2 { + case numScrapes == 2: w.Write([]byte("7&-\n")) return nil - } else if numScrapes == 3 { + case numScrapes == 3: cancel() } return errors.New("scrape failed") @@ -1282,14 +1282,14 @@ func TestScrapeLoopCache(t *testing.T) { } numScrapes++ - - if numScrapes == 1 { + switch { + case numScrapes == 1: w.Write([]byte("metric_a 42\nmetric_b 43\n")) return nil - } else if numScrapes == 3 { + case numScrapes == 3: w.Write([]byte("metric_a 44\n")) return nil - } else if numScrapes == 4 { + case numScrapes == 4: cancel() } return fmt.Errorf("scrape failed") @@ -2280,11 +2280,12 @@ func TestTargetScrapeScrapeCancel(t *testing.T) { go func() { _, err := ts.scrape(ctx, io.Discard) - if err == nil { + switch { + case err == nil: errc <- errors.New("Expected error but got nil") - } else if ctx.Err() != context.Canceled { + case ctx.Err() != context.Canceled: errc <- errors.Errorf("Expected context cancellation error but got: %s", ctx.Err()) - } else { + default: close(errc) } }() diff --git a/scrape/target.go b/scrape/target.go index f250910c1..6c4703118 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -413,9 +413,9 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort // Addresses reaching this point are already wrapped in [] if necessary. switch scheme { case "http", "": - addr = addr + ":80" + addr += ":80" case "https": - addr = addr + ":443" + addr += ":443" default: return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("invalid scheme: %q", cfg.Scheme) } diff --git a/storage/merge.go b/storage/merge.go index 8db1f7ae8..23a92df1e 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -197,13 +197,14 @@ func mergeStrings(a, b []string) []string { res := make([]string, 0, maxl*10/9) for len(a) > 0 && len(b) > 0 { - if a[0] == b[0] { + switch { + case a[0] == b[0]: res = append(res, a[0]) a, b = a[1:], b[1:] - } else if a[0] < b[0] { + case a[0] < b[0]: res = append(res, a[0]) a = a[1:] - } else { + default: res = append(res, b[0]) b = b[1:] } diff --git a/storage/remote/codec.go b/storage/remote/codec.go index bfbd08d24..02c84a3e6 100644 --- a/storage/remote/codec.go +++ b/storage/remote/codec.go @@ -291,13 +291,14 @@ func MergeLabels(primary, secondary []prompb.Label) []prompb.Label { result := make([]prompb.Label, 0, len(primary)+len(secondary)) i, j := 0, 0 for i < len(primary) && j < len(secondary) { - if primary[i].Name < secondary[j].Name { + switch { + case primary[i].Name < secondary[j].Name: result = append(result, primary[i]) i++ - } else if primary[i].Name > secondary[j].Name { + case primary[i].Name > secondary[j].Name: result = append(result, secondary[j]) j++ - } else { + default: result = append(result, primary[i]) i++ j++ @@ -428,8 +429,8 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType { c.histogramsCur += sort.Search(len(c.series.histograms)-c.histogramsCur, func(n int) bool { return c.series.histograms[n+c.histogramsCur].Timestamp >= t }) - - if c.floatsCur < len(c.series.floats) && c.histogramsCur < len(c.series.histograms) { + switch { + case c.floatsCur < len(c.series.floats) && c.histogramsCur < len(c.series.histograms): // If float samples and histogram samples have overlapping timestamps prefer the float samples. if c.series.floats[c.floatsCur].Timestamp <= c.series.histograms[c.histogramsCur].Timestamp { c.curValType = chunkenc.ValFloat @@ -445,12 +446,11 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType { c.floatsCur-- } } - } else if c.floatsCur < len(c.series.floats) { + case c.floatsCur < len(c.series.floats): c.curValType = chunkenc.ValFloat - } else if c.histogramsCur < len(c.series.histograms) { + case c.histogramsCur < len(c.series.histograms): c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur]) } - return c.curValType } @@ -514,26 +514,25 @@ func (c *concreteSeriesIterator) Next() chunkenc.ValueType { peekHistTS = c.series.histograms[c.histogramsCur+1].Timestamp } c.curValType = chunkenc.ValNone - - if peekFloatTS < peekHistTS { + switch { + case peekFloatTS < peekHistTS: c.floatsCur++ c.curValType = chunkenc.ValFloat - } else if peekHistTS < peekFloatTS { + case peekHistTS < peekFloatTS: c.histogramsCur++ c.curValType = chunkenc.ValHistogram - } else if peekFloatTS == noTS && peekHistTS == noTS { + case peekFloatTS == noTS && peekHistTS == noTS: // This only happens when the iterator is exhausted; we set the cursors off the end to prevent // Seek() from returning anything afterwards. c.floatsCur = len(c.series.floats) c.histogramsCur = len(c.series.histograms) - } else { + default: // Prefer float samples to histogram samples if there's a conflict. We advance the cursor for histograms // anyway otherwise the histogram sample will get selected on the next call to Next(). c.floatsCur++ c.histogramsCur++ c.curValType = chunkenc.ValFloat } - return c.curValType } diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 62bd17a66..0fe6d0698 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -609,7 +609,7 @@ outer: t.metrics.enqueueRetriesTotal.Inc() time.Sleep(time.Duration(backoff)) - backoff = backoff * 2 + backoff *= 2 // It is reasonable to use t.cfg.MaxBackoff here, as if we have hit // the full backoff we are likely waiting for external resources. if backoff > t.cfg.MaxBackoff { @@ -660,7 +660,7 @@ outer: t.metrics.enqueueRetriesTotal.Inc() time.Sleep(time.Duration(backoff)) - backoff = backoff * 2 + backoff *= 2 if backoff > t.cfg.MaxBackoff { backoff = t.cfg.MaxBackoff } @@ -707,7 +707,7 @@ outer: t.metrics.enqueueRetriesTotal.Inc() time.Sleep(time.Duration(backoff)) - backoff = backoff * 2 + backoff *= 2 if backoff > t.cfg.MaxBackoff { backoff = t.cfg.MaxBackoff } @@ -754,7 +754,7 @@ outer: t.metrics.enqueueRetriesTotal.Inc() time.Sleep(time.Duration(backoff)) - backoff = backoff * 2 + backoff *= 2 if backoff > t.cfg.MaxBackoff { backoff = t.cfg.MaxBackoff } diff --git a/template/template.go b/template/template.go index d61a880a2..01f6ec9a8 100644 --- a/template/template.go +++ b/template/template.go @@ -421,7 +421,7 @@ func (te Expander) ExpandHTML(templateFiles []string) (result string, resultErr } } }() - + //nolint:unconvert // Before Go 1.19 conversion from text_template to html_template is mandatory tmpl := html_template.New(te.name).Funcs(html_template.FuncMap(te.funcMap)) tmpl.Option(te.options...) tmpl.Funcs(html_template.FuncMap{ diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index f654fdb90..e284e1b77 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -739,8 +739,7 @@ func TestStorage_DuplicateExemplarsIgnored(t *testing.T) { var dec record.Decoder for r.Next() { rec := r.Record() - switch dec.Type(rec) { - case record.Exemplars: + if dec.Type(rec) == record.Exemplars { var exemplars []record.RefExemplar exemplars, err = dec.Exemplars(rec, exemplars) require.NoError(t, err) diff --git a/tsdb/block_test.go b/tsdb/block_test.go index 49a997fc5..e9dc1a9d0 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -630,7 +630,7 @@ func genHistogramSeries(totalSeries, labelCount int, mint, maxt, step int64, flo {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, - PositiveBuckets: []int64{int64(ts + 1), 1, -1, 0}, + PositiveBuckets: []int64{ts + 1, 1, -1, 0}, } if ts != mint { // By setting the counter reset hint to "no counter @@ -669,7 +669,7 @@ func genHistogramAndFloatSeries(totalSeries, labelCount int, mint, maxt, step in {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, - PositiveBuckets: []int64{int64(ts + 1), 1, -1, 0}, + PositiveBuckets: []int64{ts + 1, 1, -1, 0}, } if count > 1 && count%5 != 1 { // Same rationale for this as above in diff --git a/tsdb/chunkenc/bstream.go b/tsdb/chunkenc/bstream.go index 60531023b..7b17f4686 100644 --- a/tsdb/chunkenc/bstream.go +++ b/tsdb/chunkenc/bstream.go @@ -182,7 +182,7 @@ func (b *bstreamReader) readBits(nbits uint8) (uint64, error) { } bitmask = (uint64(1) << nbits) - 1 - v = v | ((b.buffer >> (b.valid - nbits)) & bitmask) + v |= ((b.buffer >> (b.valid - nbits)) & bitmask) b.valid -= nbits return v, nil @@ -242,13 +242,13 @@ func (b *bstreamReader) loadNextBuffer(nbits uint8) bool { if b.streamOffset+nbytes == len(b.stream) { // There can be concurrent writes happening on the very last byte // of the stream, so use the copy we took at initialization time. - buffer = buffer | uint64(b.last) + buffer |= uint64(b.last) // Read up to the byte before skip = 1 } for i := 0; i < nbytes-skip; i++ { - buffer = buffer | (uint64(b.stream[b.streamOffset+i]) << uint(8*(nbytes-i-1))) + buffer |= (uint64(b.stream[b.streamOffset+i]) << uint(8*(nbytes-i-1))) } b.buffer = buffer diff --git a/tsdb/chunkenc/float_histogram.go b/tsdb/chunkenc/float_histogram.go index b462c6d9f..6dd08a31c 100644 --- a/tsdb/chunkenc/float_histogram.go +++ b/tsdb/chunkenc/float_histogram.go @@ -785,7 +785,7 @@ func (it *floatHistogramIterator) Next() ValueType { it.err = err return ValNone } - it.tDelta = it.tDelta + tDod + it.tDelta += tDod it.t += it.tDelta if ok := it.readXor(&it.cnt.value, &it.cnt.leading, &it.cnt.trailing); !ok { diff --git a/tsdb/chunkenc/histogram.go b/tsdb/chunkenc/histogram.go index 7b6a9cacb..866fae36f 100644 --- a/tsdb/chunkenc/histogram.go +++ b/tsdb/chunkenc/histogram.go @@ -875,7 +875,7 @@ func (it *histogramIterator) Next() ValueType { it.err = err return ValNone } - it.tDelta = it.tDelta + tDod + it.tDelta += tDod it.t += it.tDelta cntDod, err := readVarbitInt(&it.br) @@ -883,7 +883,7 @@ func (it *histogramIterator) Next() ValueType { it.err = err return ValNone } - it.cntDelta = it.cntDelta + cntDod + it.cntDelta += cntDod it.cnt = uint64(int64(it.cnt) + it.cntDelta) zcntDod, err := readVarbitInt(&it.br) @@ -891,7 +891,7 @@ func (it *histogramIterator) Next() ValueType { it.err = err return ValNone } - it.zCntDelta = it.zCntDelta + zcntDod + it.zCntDelta += zcntDod it.zCnt = uint64(int64(it.zCnt) + it.zCntDelta) ok := it.readSum() diff --git a/tsdb/chunkenc/varbit.go b/tsdb/chunkenc/varbit.go index b3b14cf41..449f9fbac 100644 --- a/tsdb/chunkenc/varbit.go +++ b/tsdb/chunkenc/varbit.go @@ -122,7 +122,7 @@ func readVarbitInt(b *bstreamReader) (int64, error) { } if bits > (1 << (sz - 1)) { // Or something. - bits = bits - (1 << sz) + bits -= (1 << sz) } val = int64(bits) } diff --git a/tsdb/chunkenc/xor.go b/tsdb/chunkenc/xor.go index 2fa2f613c..8ca04502a 100644 --- a/tsdb/chunkenc/xor.go +++ b/tsdb/chunkenc/xor.go @@ -163,15 +163,15 @@ func (a *xorAppender) AppendFloatHistogram(t int64, h *histogram.FloatHistogram) func (a *xorAppender) Append(t int64, v float64) { var tDelta uint64 num := binary.BigEndian.Uint16(a.b.bytes()) - - if num == 0 { + switch { + case num == 0: buf := make([]byte, binary.MaxVarintLen64) for _, b := range buf[:binary.PutVarint(buf, t)] { a.b.writeByte(b) } a.b.writeBits(math.Float64bits(v), 64) - } else if num == 1 { + case num == 1: tDelta = uint64(t - a.t) buf := make([]byte, binary.MaxVarintLen64) @@ -181,7 +181,7 @@ func (a *xorAppender) Append(t int64, v float64) { a.writeVDelta(v) - } else { + default: tDelta = uint64(t - a.t) dod := int64(tDelta - a.tDelta) @@ -321,7 +321,7 @@ func (it *xorIterator) Next() ValueType { return ValNone } it.tDelta = tDelta - it.t = it.t + int64(it.tDelta) + it.t += int64(it.tDelta) return it.readValue() } @@ -384,7 +384,7 @@ func (it *xorIterator) Next() ValueType { } it.tDelta = uint64(int64(it.tDelta) + dod) - it.t = it.t + int64(it.tDelta) + it.t += int64(it.tDelta) return it.readValue() } diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go index ac89ae3e5..20a4c2064 100644 --- a/tsdb/chunks/head_chunks_test.go +++ b/tsdb/chunks/head_chunks_test.go @@ -503,10 +503,10 @@ func createChunkDiskMapper(t *testing.T, dir string) *ChunkDiskMapper { func randomChunk(t *testing.T) chunkenc.Chunk { chunk := chunkenc.NewXORChunk() - len := rand.Int() % 120 + length := rand.Int() % 120 app, err := chunk.Appender() require.NoError(t, err) - for i := 0; i < len; i++ { + for i := 0; i < length; i++ { app.Append(rand.Int63(), rand.Float64()) } return chunk diff --git a/tsdb/compact.go b/tsdb/compact.go index b2d412375..7c061b0bb 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -44,7 +44,7 @@ func ExponentialBlockRanges(minSize int64, steps, stepSize int) []int64 { curRange := minSize for i := 0; i < steps; i++ { ranges = append(ranges, curRange) - curRange = curRange * int64(stepSize) + curRange *= int64(stepSize) } return ranges diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 6a7e6ea68..5a9eadeda 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -1452,12 +1452,6 @@ func TestSparseHistogramSpaceSavings(t *testing.T) { {100, 15, 3, 5}, {100, 50, 3, 3}, {100, 100, 3, 2}, - //{1000, 15, 1, 0}, - //{1000, 50, 1, 0}, - //{1000, 100, 1, 0}, - //{1000, 15, 3, 5}, - //{1000, 50, 3, 3}, - //{1000, 100, 3, 2}, } type testSummary struct { diff --git a/tsdb/db.go b/tsdb/db.go index 659251c3c..a10f07b1e 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -260,7 +260,7 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { Help: "Size of symbol table in memory for loaded blocks", }, func() float64 { db.mtx.RLock() - blocks := db.blocks[:] + blocks := db.blocks db.mtx.RUnlock() symTblSize := uint64(0) for _, b := range blocks { @@ -1186,7 +1186,7 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID } }() - for t := blockSize * (oooHeadMint / blockSize); t <= oooHeadMaxt; t = t + blockSize { + for t := blockSize * (oooHeadMint / blockSize); t <= oooHeadMaxt; t += blockSize { mint, maxt := t, t+blockSize // Block intervals are half-open: [b.MinTime, b.MaxTime). Block intervals are always +1 than the total samples it includes. uid, err := db.compactor.Write(dest, oooHead.CloneForTimeRange(mint, maxt-1), mint, maxt, nil) @@ -1508,7 +1508,7 @@ func BeyondSizeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struc blocksSize := db.Head().Size() for i, block := range blocks { blocksSize += block.Size() - if blocksSize > int64(db.opts.MaxBytes) { + if blocksSize > db.opts.MaxBytes { // Add this and all following blocks for deletion. for _, b := range blocks[i:] { deletable[b.meta.ULID] = struct{}{} diff --git a/tsdb/db_test.go b/tsdb/db_test.go index c54fccf6f..7e1f89a95 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -1076,7 +1076,7 @@ func TestWALSegmentSizeOptions(t *testing.T) { dbDir := db.Dir() require.NoError(t, db.Close()) - testFunc(dbDir, int(opts.WALSegmentSize)) + testFunc(dbDir, opts.WALSegmentSize) }) } } @@ -2996,7 +2996,7 @@ func TestCompactHead(t *testing.T) { series = seriesSet.At().Iterator(series) for series.Next() == chunkenc.ValFloat { time, val := series.At() - actSamples = append(actSamples, sample{int64(time), val, nil, nil}) + actSamples = append(actSamples, sample{time, val, nil, nil}) } require.NoError(t, series.Err()) } diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index 5ba3567e4..ad3b2ef39 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -115,17 +115,17 @@ func NewExemplarMetrics(reg prometheus.Registerer) *ExemplarMetrics { // 1GB of extra memory, accounting for the fact that this is heap allocated space. // If len <= 0, then the exemplar storage is essentially a noop storage but can later be // resized to store exemplars. -func NewCircularExemplarStorage(len int64, m *ExemplarMetrics) (ExemplarStorage, error) { - if len < 0 { - len = 0 +func NewCircularExemplarStorage(length int64, m *ExemplarMetrics) (ExemplarStorage, error) { + if length < 0 { + length = 0 } c := &CircularExemplarStorage{ - exemplars: make([]*circularBufferEntry, len), - index: make(map[string]*indexEntry, len/estimatedExemplarsPerSeries), + exemplars: make([]*circularBufferEntry, length), + index: make(map[string]*indexEntry, length/estimatedExemplarsPerSeries), metrics: m, } - c.metrics.maxExemplars.Set(float64(len)) + c.metrics.maxExemplars.Set(float64(length)) return c, nil } @@ -151,7 +151,7 @@ func (ce *CircularExemplarStorage) Querier(_ context.Context) (storage.ExemplarQ func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*labels.Matcher) ([]exemplar.QueryResult, error) { ret := make([]exemplar.QueryResult, 0) - if len(ce.exemplars) <= 0 { + if len(ce.exemplars) == 0 { return ret, nil } @@ -219,7 +219,7 @@ func (ce *CircularExemplarStorage) ValidateExemplar(l labels.Labels, e exemplar. // Not thread safe. The append parameters tells us whether this is an external validation, or internal // as a result of an AddExemplar call, in which case we should update any relevant metrics. func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemplar, append bool) error { - if len(ce.exemplars) <= 0 { + if len(ce.exemplars) == 0 { return storage.ErrExemplarsDisabled } @@ -334,7 +334,7 @@ func (ce *CircularExemplarStorage) migrate(entry *circularBufferEntry) { } func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemplar) error { - if len(ce.exemplars) <= 0 { + if len(ce.exemplars) == 0 { return storage.ErrExemplarsDisabled } diff --git a/tsdb/head.go b/tsdb/head.go index 4696884f2..ca953b175 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -1453,7 +1453,7 @@ func (h *Head) Delete(mint, maxt int64, ms ...*labels.Matcher) error { } } for _, s := range stones { - h.tombstones.AddInterval(storage.SeriesRef(s.Ref), s.Intervals[0]) + h.tombstones.AddInterval(s.Ref, s.Intervals[0]) } return nil diff --git a/tsdb/head_test.go b/tsdb/head_test.go index e80c197b2..9326ddbe1 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -3005,7 +3005,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { hists = tsdbutil.GenerateTestHistograms(numHistograms) } for _, h := range hists { - h.Count = h.Count * 2 + h.Count *= 2 h.NegativeSpans = h.PositiveSpans h.NegativeBuckets = h.PositiveBuckets _, err := app.AppendHistogram(0, s1, ts, h, nil) @@ -3028,7 +3028,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { hists = tsdbutil.GenerateTestFloatHistograms(numHistograms) } for _, h := range hists { - h.Count = h.Count * 2 + h.Count *= 2 h.NegativeSpans = h.PositiveSpans h.NegativeBuckets = h.PositiveBuckets _, err := app.AppendHistogram(0, s1, ts, nil, h) @@ -3069,26 +3069,26 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { } for _, h := range hists { ts++ - h.Count = h.Count * 2 + h.Count *= 2 h.NegativeSpans = h.PositiveSpans h.NegativeBuckets = h.PositiveBuckets - _, err := app.AppendHistogram(0, s2, int64(ts), h, nil) + _, err := app.AppendHistogram(0, s2, ts, h, nil) require.NoError(t, err) eh := h.Copy() if !gauge && ts > 30 && (ts-10)%20 == 1 { // Need "unknown" hint after float sample. eh.CounterResetHint = histogram.UnknownCounterReset } - exp[k2] = append(exp[k2], sample{t: int64(ts), h: eh}) + exp[k2] = append(exp[k2], sample{t: ts, h: eh}) if ts%20 == 0 { require.NoError(t, app.Commit()) app = head.Appender(context.Background()) // Add some float. for i := 0; i < 10; i++ { ts++ - _, err := app.Append(0, s2, int64(ts), float64(ts)) + _, err := app.Append(0, s2, ts, float64(ts)) require.NoError(t, err) - exp[k2] = append(exp[k2], sample{t: int64(ts), f: float64(ts)}) + exp[k2] = append(exp[k2], sample{t: ts, f: float64(ts)}) } require.NoError(t, app.Commit()) app = head.Appender(context.Background()) @@ -3106,26 +3106,26 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { } for _, h := range hists { ts++ - h.Count = h.Count * 2 + h.Count *= 2 h.NegativeSpans = h.PositiveSpans h.NegativeBuckets = h.PositiveBuckets - _, err := app.AppendHistogram(0, s2, int64(ts), nil, h) + _, err := app.AppendHistogram(0, s2, ts, nil, h) require.NoError(t, err) eh := h.Copy() if !gauge && ts > 30 && (ts-10)%20 == 1 { // Need "unknown" hint after float sample. eh.CounterResetHint = histogram.UnknownCounterReset } - exp[k2] = append(exp[k2], sample{t: int64(ts), fh: eh}) + exp[k2] = append(exp[k2], sample{t: ts, fh: eh}) if ts%20 == 0 { require.NoError(t, app.Commit()) app = head.Appender(context.Background()) // Add some float. for i := 0; i < 10; i++ { ts++ - _, err := app.Append(0, s2, int64(ts), float64(ts)) + _, err := app.Append(0, s2, ts, float64(ts)) require.NoError(t, err) - exp[k2] = append(exp[k2], sample{t: int64(ts), f: float64(ts)}) + exp[k2] = append(exp[k2], sample{t: ts, f: float64(ts)}) } require.NoError(t, app.Commit()) app = head.Appender(context.Background()) @@ -4495,11 +4495,12 @@ func TestHistogramValidation(t *testing.T) { } err = ValidateFloatHistogram(tc.h.ToFloat()) - if tc.errMsgFloat != "" { + switch { + case tc.errMsgFloat != "": require.ErrorContains(t, err, tc.errMsgFloat) - } else if tc.errMsg != "" { + case tc.errMsg != "": require.ErrorContains(t, err, tc.errMsg) - } else { + default: require.NoError(t, err) } }) diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 6a8a30d5a..b3537d060 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -299,7 +299,7 @@ Outer: unknownRefs.Inc() continue } - h.tombstones.AddInterval(storage.SeriesRef(s.Ref), itv) + h.tombstones.AddInterval(s.Ref, itv) } } tstonesPool.Put(v) @@ -382,7 +382,7 @@ Outer: floatHistogramsPool.Put(v) case []record.RefMetadata: for _, m := range v { - s := h.series.getByID(chunks.HeadSeriesRef(m.Ref)) + s := h.series.getByID(m.Ref) if s == nil { unknownMetadataRefs.Inc() continue diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 9f584ee82..50a701d3a 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -536,7 +536,7 @@ func (w *Writer) finishSymbols() error { // Write out the length and symbol count. w.buf1.Reset() w.buf1.PutBE32int(int(symbolTableSize)) - w.buf1.PutBE32int(int(w.numSymbols)) + w.buf1.PutBE32int(w.numSymbols) if err := w.writeAt(w.buf1.Get(), w.toc.Symbols); err != nil { return err } diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index b55d70df0..c57c085ec 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -561,10 +561,8 @@ func newMergedPostings(p []Postings) (m *mergedPostings, nonEmpty bool) { // NOTE: mergedPostings struct requires the user to issue an initial Next. if it.Next() { ph = append(ph, it) - } else { - if it.Err() != nil { - return &mergedPostings{err: it.Err()}, true - } + } else if it.Err() != nil { + return &mergedPostings{err: it.Err()}, true } } @@ -699,15 +697,16 @@ func (rp *removedPostings) Next() bool { } fcur, rcur := rp.full.At(), rp.remove.At() - if fcur < rcur { + switch { + case fcur < rcur: rp.cur = fcur rp.fok = rp.full.Next() return true - } else if rcur < fcur { + case rcur < fcur: // Forward the remove postings to the right position. rp.rok = rp.remove.Seek(fcur) - } else { + default: // Skip the current posting. rp.fok = rp.full.Next() } diff --git a/tsdb/index/postingsstats.go b/tsdb/index/postingsstats.go index 5e5880720..6b29bddab 100644 --- a/tsdb/index/postingsstats.go +++ b/tsdb/index/postingsstats.go @@ -31,10 +31,10 @@ type maxHeap struct { Items []Stat } -func (m *maxHeap) init(len int) { - m.maxLength = len +func (m *maxHeap) init(length int) { + m.maxLength = length m.minValue = math.MaxUint64 - m.Items = make([]Stat, 0, len) + m.Items = make([]Stat, 0, length) } func (m *maxHeap) push(item Stat) { diff --git a/tsdb/isolation.go b/tsdb/isolation.go index 74d63c6af..401e5885a 100644 --- a/tsdb/isolation.go +++ b/tsdb/isolation.go @@ -254,7 +254,7 @@ func (txr *txRing) add(appendID uint64) { if txr.txIDCount == len(txr.txIDs) { // Ring buffer is full, expand by doubling. newRing := make([]uint64, txr.txIDCount*2) - idx := copy(newRing[:], txr.txIDs[txr.txIDFirst:]) + idx := copy(newRing, txr.txIDs[txr.txIDFirst:]) copy(newRing[idx:], txr.txIDs[:txr.txIDFirst]) txr.txIDs = newRing txr.txIDFirst = 0 diff --git a/tsdb/querier.go b/tsdb/querier.go index 4b3144c71..8806c7e73 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -239,18 +239,20 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings, } for _, m := range ms { - if m.Name == "" && m.Value == "" { // Special-case for AllPostings, used in tests at least. + switch { + case m.Name == "" && m.Value == "": // Special-case for AllPostings, used in tests at least. k, v := index.AllPostingsKey() allPostings, err := ix.Postings(k, v) if err != nil { return nil, err } its = append(its, allPostings) - } else if labelMustBeSet[m.Name] { + case labelMustBeSet[m.Name]: // If this matcher must be non-empty, we can be smarter. matchesEmpty := m.Matches("") isNot := m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp - if isNot && matchesEmpty { // l!="foo" + switch { + case isNot && matchesEmpty: // l!="foo" // If the label can't be empty and is a Not and the inner matcher // doesn't match empty, then subtract it out at the end. inverse, err := m.Inverse() @@ -263,7 +265,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings, return nil, err } notIts = append(notIts, it) - } else if isNot && !matchesEmpty { // l!="" + case isNot && !matchesEmpty: // l!="" // If the label can't be empty and is a Not, but the inner matcher can // be empty we need to use inversePostingsForMatcher. inverse, err := m.Inverse() @@ -279,7 +281,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings, return index.EmptyPostings(), nil } its = append(its, it) - } else { // l="a" + default: // l="a" // Non-Not matcher, use normal postingsForMatcher. it, err := postingsForMatcher(ix, m) if err != nil { @@ -290,7 +292,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings, } its = append(its, it) } - } else { // l="" + default: // l="" // If the matchers for a labelname selects an empty value, it selects all // the series which don't have the label name set too. See: // https://github.com/prometheus/prometheus/issues/3575 and @@ -965,24 +967,24 @@ func (m *mergedStringIter) Next() bool { if (!m.aok && !m.bok) || (m.Err() != nil) { return false } - - if !m.aok { + switch { + case !m.aok: m.cur = m.b.At() m.bok = m.b.Next() m.err = m.b.Err() - } else if !m.bok { + case !m.bok: m.cur = m.a.At() m.aok = m.a.Next() m.err = m.a.Err() - } else if m.b.At() > m.a.At() { + case m.b.At() > m.a.At(): m.cur = m.a.At() m.aok = m.a.Next() m.err = m.a.Err() - } else if m.a.At() > m.b.At() { + case m.a.At() > m.b.At(): m.cur = m.b.At() m.bok = m.b.Next() m.err = m.b.Err() - } else { // Equal. + default: // Equal. m.cur = m.b.At() m.aok = m.a.Next() m.err = m.a.Err() diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index fa3dd2418..8f52fff28 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -113,7 +113,7 @@ func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkRe var chunkRef chunks.ChunkRef for i, s := range tc { - i = i + 1 // 0 is not a valid posting. + i++ // 0 is not a valid posting. metas := make([]chunks.Meta, 0, len(s.chunks)) for _, chk := range s.chunks { if chk[0].t < blockMint { @@ -2012,7 +2012,7 @@ func BenchmarkQueries(b *testing.B) { for x := 0; x <= 10; x++ { block, err := OpenBlock(nil, createBlock(b, dir, series), nil) require.NoError(b, err) - q, err := NewBlockQuerier(block, 1, int64(nSamples)) + q, err := NewBlockQuerier(block, 1, nSamples) require.NoError(b, err) qs = append(qs, q) } diff --git a/tsdb/wal.go b/tsdb/wal.go index e0bc1ec69..a9af76d15 100644 --- a/tsdb/wal.go +++ b/tsdb/wal.go @@ -90,7 +90,7 @@ func newWalMetrics(r prometheus.Registerer) *walMetrics { // WAL is a write ahead log that can log new series labels and samples. // It must be completely read before new entries are logged. // -// DEPRECATED: use wlog pkg combined with the record codex instead. +// Deprecated: use wlog pkg combined with the record codex instead. type WAL interface { Reader() WALReader LogSeries([]record.RefSeries) error @@ -147,7 +147,7 @@ func newCRC32() hash.Hash32 { // SegmentWAL is a write ahead log for series data. // -// DEPRECATED: use wlog pkg combined with the record coders instead. +// Deprecated: use wlog pkg combined with the record coders instead. type SegmentWAL struct { mtx sync.Mutex metrics *walMetrics diff --git a/tsdb/wlog/wlog_test.go b/tsdb/wlog/wlog_test.go index ed8a9df2e..7f9133a76 100644 --- a/tsdb/wlog/wlog_test.go +++ b/tsdb/wlog/wlog_test.go @@ -428,10 +428,10 @@ func TestLogPartialWrite(t *testing.T) { faultyRecord: pageSize / (recordHeaderSize + len(record)), }, // TODO the current implementation suffers this: - //"partial write when logging a record overlapping two pages": { + // "partial write when logging a record overlapping two pages": { // numRecords: (pageSize / (recordHeaderSize + len(record))) + 10, // faultyRecord: pageSize/(recordHeaderSize+len(record)) + 1, - //}, + // }, } for testName, testData := range tests { diff --git a/util/runtime/limits_default.go b/util/runtime/limits_default.go index c3e0b4701..1588a93c7 100644 --- a/util/runtime/limits_default.go +++ b/util/runtime/limits_default.go @@ -39,7 +39,7 @@ func getLimits(resource int, unit string) string { if err != nil { panic("syscall.Getrlimit failed: " + err.Error()) } - return fmt.Sprintf("(soft=%s, hard=%s)", limitToString(uint64(rlimit.Cur), unit), limitToString(uint64(rlimit.Max), unit)) + return fmt.Sprintf("(soft=%s, hard=%s)", limitToString(rlimit.Cur, unit), limitToString(rlimit.Max, unit)) } // FdLimits returns the soft and hard limits for file descriptors. diff --git a/util/runtime/statfs_default.go b/util/runtime/statfs_default.go index f850f2cd6..2e31d93fc 100644 --- a/util/runtime/statfs_default.go +++ b/util/runtime/statfs_default.go @@ -72,11 +72,13 @@ func Statfs(path string) string { var fs syscall.Statfs_t err := syscall.Statfs(path, &fs) + //nolint:unconvert // This ensure Type format on all Platforms + localType := int64(fs.Type) if err != nil { - return strconv.FormatInt(int64(fs.Type), 16) + return strconv.FormatInt(localType, 16) } - if fsType, ok := fsTypes[int64(fs.Type)]; ok { + if fsType, ok := fsTypes[localType]; ok { return fsType } - return strconv.FormatInt(int64(fs.Type), 16) + return strconv.FormatInt(localType, 16) } diff --git a/web/api/v1/api.go b/web/api/v1/api.go index aeea87ca7..2e0016fd2 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -243,7 +243,7 @@ func NewAPI( remoteReadConcurrencyLimit int, remoteReadMaxBytesInFrame int, isAgent bool, - CORSOrigin *regexp.Regexp, + corsOrigin *regexp.Regexp, runtimeInfo func() (RuntimeInfo, error), buildInfo *PrometheusVersion, gatherer prometheus.Gatherer, @@ -269,7 +269,7 @@ func NewAPI( enableAdmin: enableAdmin, rulesRetriever: rr, logger: logger, - CORSOrigin: CORSOrigin, + CORSOrigin: corsOrigin, runtimeInfo: runtimeInfo, buildInfo: buildInfo, gatherer: gatherer, diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index efce04221..27cbab1b3 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -2791,7 +2791,7 @@ func TestRespondSuccess(t *testing.T) { } var res response - if err = json.Unmarshal([]byte(body), &res); err != nil { + if err = json.Unmarshal(body, &res); err != nil { t.Fatalf("Error unmarshaling JSON body: %s", err) } @@ -2827,7 +2827,7 @@ func TestRespondError(t *testing.T) { } var res response - if err = json.Unmarshal([]byte(body), &res); err != nil { + if err = json.Unmarshal(body, &res); err != nil { t.Fatalf("Error unmarshaling JSON body: %s", err) } diff --git a/web/web.go b/web/web.go index 9d63094f6..f4f64163d 100644 --- a/web/web.go +++ b/web/web.go @@ -719,9 +719,9 @@ func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) { } if h.options.TSDBMaxBytes != 0 { if status.StorageRetention != "" { - status.StorageRetention = status.StorageRetention + " or " + status.StorageRetention += " or " } - status.StorageRetention = status.StorageRetention + h.options.TSDBMaxBytes.String() + status.StorageRetention += h.options.TSDBMaxBytes.String() } metrics, err := h.gatherer.Gather() From 052993414a60254d71bd0e19cf3ee648ca697728 Mon Sep 17 00:00:00 2001 From: Justin Lei Date: Wed, 12 Apr 2023 09:48:35 -0700 Subject: [PATCH 075/632] Add storage.tsdb.samples-per-chunk flag Signed-off-by: Justin Lei --- cmd/prometheus/main.go | 5 +++++ tsdb/db.go | 5 +++++ tsdb/head.go | 19 +++++++++++++------ tsdb/head_append.go | 7 ++----- tsdb/head_test.go | 12 ++++++------ 5 files changed, 31 insertions(+), 17 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index f4f6af20d..cafe2f819 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -336,6 +336,9 @@ func main() { serverOnlyFlag(a, "storage.tsdb.head-chunks-write-queue-size", "Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental."). Default("0").IntVar(&cfg.tsdb.HeadChunksWriteQueueSize) + serverOnlyFlag(a, "storage.tsdb.samples-per-chunk", "Target number of samples per chunk."). + Default("120").Hidden().IntVar(&cfg.tsdb.SamplesPerChunk) + agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage."). Default("data-agent/").StringVar(&cfg.agentStoragePath) @@ -1542,6 +1545,7 @@ type tsdbOptions struct { NoLockfile bool WALCompression bool HeadChunksWriteQueueSize int + SamplesPerChunk int StripeSize int MinBlockDuration model.Duration MaxBlockDuration model.Duration @@ -1562,6 +1566,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { AllowOverlappingCompaction: true, WALCompression: opts.WALCompression, HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize, + SamplesPerChunk: opts.SamplesPerChunk, StripeSize: opts.StripeSize, MinBlockDuration: int64(time.Duration(opts.MinBlockDuration) / time.Millisecond), MaxBlockDuration: int64(time.Duration(opts.MaxBlockDuration) / time.Millisecond), diff --git a/tsdb/db.go b/tsdb/db.go index 659251c3c..e0e9c69f0 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -78,6 +78,7 @@ func DefaultOptions() *Options { NoLockfile: false, AllowOverlappingCompaction: true, WALCompression: false, + SamplesPerChunk: DefaultSamplesPerChunk, StripeSize: DefaultStripeSize, HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize, IsolationDisabled: defaultIsolationDisabled, @@ -149,6 +150,9 @@ type Options struct { // HeadChunksWriteQueueSize configures the size of the chunk write queue used in the head chunks mapper. HeadChunksWriteQueueSize int + // SamplesPerChunk configures the target number of samples per chunk. + SamplesPerChunk int + // SeriesLifecycleCallback specifies a list of callbacks that will be called during a lifecycle of a series. // It is always a no-op in Prometheus and mainly meant for external users who import TSDB. SeriesLifecycleCallback SeriesLifecycleCallback @@ -778,6 +782,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs headOpts.ChunkPool = db.chunkPool headOpts.ChunkWriteBufferSize = opts.HeadChunksWriteBufferSize headOpts.ChunkWriteQueueSize = opts.HeadChunksWriteQueueSize + headOpts.SamplesPerChunk = opts.SamplesPerChunk headOpts.StripeSize = opts.StripeSize headOpts.SeriesCallback = opts.SeriesLifecycleCallback headOpts.EnableExemplarStorage = opts.EnableExemplarStorage diff --git a/tsdb/head.go b/tsdb/head.go index af8175cd0..b4df1b2d0 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -150,6 +150,8 @@ type HeadOptions struct { ChunkWriteBufferSize int ChunkWriteQueueSize int + SamplesPerChunk int + // StripeSize sets the number of entries in the hash map, it must be a power of 2. // A larger StripeSize will allocate more memory up-front, but will increase performance when handling a large number of series. // A smaller StripeSize reduces the memory allocated, but can decrease performance with large number of series. @@ -169,6 +171,8 @@ type HeadOptions struct { const ( // DefaultOutOfOrderCapMax is the default maximum size of an in-memory out-of-order chunk. DefaultOutOfOrderCapMax int64 = 32 + // DefaultSamplesPerChunk provides a default target number of samples per chunk. + DefaultSamplesPerChunk = 120 ) func DefaultHeadOptions() *HeadOptions { @@ -178,6 +182,7 @@ func DefaultHeadOptions() *HeadOptions { ChunkPool: chunkenc.NewPool(), ChunkWriteBufferSize: chunks.DefaultWriteBufferSize, ChunkWriteQueueSize: chunks.DefaultWriteQueueSize, + SamplesPerChunk: DefaultSamplesPerChunk, StripeSize: DefaultStripeSize, SeriesCallback: &noopSeriesLifecycleCallback{}, IsolationDisabled: defaultIsolationDisabled, @@ -1607,7 +1612,7 @@ func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool, e func (h *Head) getOrCreateWithID(id chunks.HeadSeriesRef, hash uint64, lset labels.Labels) (*memSeries, bool, error) { s, created, err := h.series.getOrSet(hash, lset, func() *memSeries { - return newMemSeries(lset, id, h.opts.IsolationDisabled) + return newMemSeries(lset, id, h.opts.IsolationDisabled, h.opts.SamplesPerChunk) }) if err != nil { return nil, false, err @@ -1915,7 +1920,8 @@ type memSeries struct { mmMaxTime int64 // Max time of any mmapped chunk, only used during WAL replay. - nextAt int64 // Timestamp at which to cut the next chunk. + samplesPerChunk int // Target number of samples per chunk. + nextAt int64 // Timestamp at which to cut the next chunk. // We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates. lastValue float64 @@ -1943,11 +1949,12 @@ type memSeriesOOOFields struct { firstOOOChunkID chunks.HeadChunkID // HeadOOOChunkID for oooMmappedChunks[0]. } -func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, isolationDisabled bool) *memSeries { +func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, isolationDisabled bool, samplesPerChunk int) *memSeries { s := &memSeries{ - lset: lset, - ref: id, - nextAt: math.MinInt64, + lset: lset, + ref: id, + nextAt: math.MinInt64, + samplesPerChunk: samplesPerChunk, } if !isolationDisabled { s.txs = newTxRing(4) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 46180051e..eb5b219ea 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -1324,9 +1324,6 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, func (s *memSeries) appendPreprocessor( t int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, ) (c *memChunk, sampleInOrder, chunkCreated bool) { - // The basis for this number can be found here: https://github.com/prometheus/prometheus/pull/12055 - const samplesPerChunk = 220 - c = s.head() if c == nil { @@ -1363,7 +1360,7 @@ func (s *memSeries) appendPreprocessor( // for this chunk that will try to make samples equally distributed within // the remaining chunks in the current chunk range. // At latest it must happen at the timestamp set when the chunk was cut. - if numSamples == samplesPerChunk/4 { + if numSamples == s.samplesPerChunk/4 { s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, s.nextAt) } // If numSamples > samplesPerChunk*2 then our previous prediction was invalid, @@ -1371,7 +1368,7 @@ func (s *memSeries) appendPreprocessor( // Since we assume that the rate is higher, we're being conservative and cutting at 2*samplesPerChunk // as we expect more chunks to come. // Note that next chunk will have its nextAt recalculated for the new rate. - if t >= s.nextAt || numSamples >= samplesPerChunk*2 { + if t >= s.nextAt || numSamples >= s.samplesPerChunk*2 { c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange) chunkCreated = true } diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 9a7220957..df48e592d 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -284,7 +284,7 @@ func BenchmarkLoadWAL(b *testing.B) { require.NoError(b, err) for k := 0; k < c.batches*c.seriesPerBatch; k++ { // Create one mmapped chunk per series, with one sample at the given time. - s := newMemSeries(labels.Labels{}, chunks.HeadSeriesRef(k)*101, defaultIsolationDisabled) + s := newMemSeries(labels.Labels{}, chunks.HeadSeriesRef(k)*101, defaultIsolationDisabled, DefaultSamplesPerChunk) s.append(c.mmappedChunkT, 42, 0, chunkDiskMapper, c.mmappedChunkT) s.mmapCurrentHeadChunk(chunkDiskMapper) } @@ -806,7 +806,7 @@ func TestMemSeries_truncateChunks(t *testing.T) { }, } - s := newMemSeries(labels.FromStrings("a", "b"), 1, defaultIsolationDisabled) + s := newMemSeries(labels.FromStrings("a", "b"), 1, defaultIsolationDisabled, DefaultSamplesPerChunk) for i := 0; i < 8000; i += 5 { ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange) @@ -1337,7 +1337,7 @@ func TestMemSeries_append(t *testing.T) { }() const chunkRange = 500 - s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) + s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled, DefaultSamplesPerChunk) // Add first two samples at the very end of a chunk range and the next two // on and after it. @@ -1391,7 +1391,7 @@ func TestMemSeries_appendHistogram(t *testing.T) { }() chunkRange := int64(1000) - s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) + s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled, DefaultSamplesPerChunk) histograms := tsdbutil.GenerateTestHistograms(4) histogramWithOneMoreBucket := histograms[3].Copy() @@ -1447,7 +1447,7 @@ func TestMemSeries_append_atVariableRate(t *testing.T) { }) chunkRange := DefaultBlockDuration - s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) + s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled, DefaultSamplesPerChunk) // At this slow rate, we will fill the chunk in two block durations. slowRate := (DefaultBlockDuration * 2) / samplesPerChunk @@ -2609,7 +2609,7 @@ func TestIteratorSeekIntoBuffer(t *testing.T) { }() const chunkRange = 500 - s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) + s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled, DefaultSamplesPerChunk) for i := 0; i < 7; i++ { ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange) From c3e6b8563109ab09ccea33283ae27bfe3121b77a Mon Sep 17 00:00:00 2001 From: Justin Lei Date: Thu, 13 Apr 2023 14:35:29 -0700 Subject: [PATCH 076/632] Reverse test changes Signed-off-by: Justin Lei --- storage/remote/read_handler_test.go | 135 ++++++++++++++-------------- tsdb/db_test.go | 12 +-- tsdb/head_test.go | 24 ++--- 3 files changed, 84 insertions(+), 87 deletions(-) diff --git a/storage/remote/read_handler_test.go b/storage/remote/read_handler_test.go index 0c186097a..261c28e21 100644 --- a/storage/remote/read_handler_test.go +++ b/storage/remote/read_handler_test.go @@ -202,18 +202,15 @@ func BenchmarkStreamReadEndpoint(b *testing.B) { } func TestStreamReadEndpoint(t *testing.T) { - // Note: samplesPerChunk is set to 220, but that isn't cleanly divisible by the chunkRange of 24 hrs and 1 min - // resolution used in this test so tsdb.computeChunkEndTime will put 240 samples in each chunk. - // - // First with 239 samples; we expect 1 frame with 1 full chunk. - // Second with 241 samples; we expect 1 frame with 2 chunks. - // Third with 481 samples; we expect 1 frame with 2 chunks, and 1 frame with 1 chunk for the same series due to bytes limit. + // First with 120 samples. We expect 1 frame with 1 chunk. + // Second with 121 samples, We expect 1 frame with 2 chunks. + // Third with 241 samples. We expect 1 frame with 2 chunks, and 1 frame with 1 chunk for the same series due to bytes limit. // Fourth with 120 histogram samples. We expect 1 frame with 1 chunk. suite, err := promql.NewTest(t, ` load 1m - test_metric1{foo="bar1",baz="qux"} 0+100x239 - test_metric1{foo="bar2",baz="qux"} 0+100x240 - test_metric1{foo="bar3",baz="qux"} 0+100x480 + test_metric1{foo="bar1",baz="qux"} 0+100x119 + test_metric1{foo="bar2",baz="qux"} 0+100x120 + test_metric1{foo="bar3",baz="qux"} 0+100x240 `) require.NoError(t, err) defer suite.Close() @@ -231,8 +228,8 @@ func TestStreamReadEndpoint(t *testing.T) { } }, 1e6, 1, - // Labelset has 57 bytes. Full chunk in test data has roughly 440 bytes. This allows us to have at max 2 chunks in this test. - 57+880, + // Labelset has 57 bytes. Full chunk in test data has roughly 240 bytes. This allows us to have at max 2 chunks in this test. + 57+480, ) // Encode the request. @@ -248,19 +245,19 @@ func TestStreamReadEndpoint(t *testing.T) { matcher4, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test_histogram_metric1") require.NoError(t, err) - query1, err := ToQuery(0, 32460001, []*labels.Matcher{matcher1, matcher2}, &storage.SelectHints{ + query1, err := ToQuery(0, 14400001, []*labels.Matcher{matcher1, matcher2}, &storage.SelectHints{ Step: 1, Func: "avg", Start: 0, - End: 32460001, + End: 14400001, }) require.NoError(t, err) - query2, err := ToQuery(0, 32460001, []*labels.Matcher{matcher1, matcher3}, &storage.SelectHints{ + query2, err := ToQuery(0, 14400001, []*labels.Matcher{matcher1, matcher3}, &storage.SelectHints{ Step: 1, Func: "avg", Start: 0, - End: 32460001, + End: 14400001, }) require.NoError(t, err) @@ -319,8 +316,8 @@ func TestStreamReadEndpoint(t *testing.T) { Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, - MaxTimeMs: 14340000, - Data: []byte("\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xd4\x03\xc2|\x05\x94\x00\xc1\xac}\xe9z2\xd0O\xed\xb4n[\aΔ\xa3md\xf9\xd0\xfd\x1aPm\nS\x9d\x0eQ\xad\x06P\xbd\xa8\xbfʁZ\x03(3\xa0R\x87\xda\x05(\x0f\xa0ҁ\xda=(\x13\xa02\x83Z\a(w\xa02\x81Z\x0f(\x13\xb5\x97\xf4P\x1b@\xa5\a\xf4\nP\x1bC\xa5\x02t\x1eP+@e\x1e\xf4\x0ePk@e\x02t:P;A\xa5\x01\xf4\nS\xfb@\xa5\x06t\x06P+C\xe5\x02\xf4\x06Pk@\xe5\x1et\nP\x1bA\xa5\x03\xf4:P\x1b@\xa5\x06t\x0eZJ\xff\\\x85ˈ\u05f8\x0f\xe5+F[\xc8\xe7E)\xed\x14\xa1\xf6\xe2}(v\x8d(N\x83)և(ރ(V\xdaW\xf2\x82t4\xa0m\x05(\xffAJ\x06\xd0҂t\xfc\xa0\xad\x03(oA\xca:\xd02\x82t4\xa0\xed\xb0\xbfҀ\xfa\x05(=\xa0R\x87:\x03(\x15\xa0\xf2\x81z\x03(\xf5\xa0r\x83:\x05(\r\xa1ҁ\xfa\r(\r\xa0R\x9f:\a(5\xa02\x81z\x1f(\x15\xa02\x83:\x05(\xfd\xa0R\x80\xfa\r(\x1d\xa1ҁ:\x03(5\xa0r\xd6g\xf4\xb9\x8b\x97!\xabq\x1f\xcaN\xe0GJ\x1bE)\xfd\x14\xa1\xb6\xf2}(N\x8f(V\x83)އ(փ(N\xdcW҃\xb44\xa0}\x05(\xfbAJ\x19\xd02\x82\xb4\xfc\xa0\xbd\x03(kA\xca9\xd0R\x81\xb44\xa0\xfd\xb4\xbfҀ\xda\x05(3\xa0r\x87Z\x03(\x17\xa0\xf2\x81Z\x03(\xf3\xa0R\x83\xda\x05(\x0f\xa1ҁ\xda\r(\x13\xa02\x9fZ\a(7\xa02\x81Z\x1f(\x13\xa0Ҁ\xda\x05(\xff\xa0R\x80\xda\r "), + MaxTimeMs: 7140000, + Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"), }, }, }, @@ -339,61 +336,61 @@ func TestStreamReadEndpoint(t *testing.T) { Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, - MaxTimeMs: 14340000, - Data: []byte("\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xd4\x03\xc2|\x05\x94\x00\xc1\xac}\xe9z2\xd0O\xed\xb4n[\aΔ\xa3md\xf9\xd0\xfd\x1aPm\nS\x9d\x0eQ\xad\x06P\xbd\xa8\xbfʁZ\x03(3\xa0R\x87\xda\x05(\x0f\xa0ҁ\xda=(\x13\xa02\x83Z\a(w\xa02\x81Z\x0f(\x13\xb5\x97\xf4P\x1b@\xa5\a\xf4\nP\x1bC\xa5\x02t\x1eP+@e\x1e\xf4\x0ePk@e\x02t:P;A\xa5\x01\xf4\nS\xfb@\xa5\x06t\x06P+C\xe5\x02\xf4\x06Pk@\xe5\x1et\nP\x1bA\xa5\x03\xf4:P\x1b@\xa5\x06t\x0eZJ\xff\\\x85ˈ\u05f8\x0f\xe5+F[\xc8\xe7E)\xed\x14\xa1\xf6\xe2}(v\x8d(N\x83)և(ރ(V\xdaW\xf2\x82t4\xa0m\x05(\xffAJ\x06\xd0҂t\xfc\xa0\xad\x03(oA\xca:\xd02\x82t4\xa0\xed\xb0\xbfҀ\xfa\x05(=\xa0R\x87:\x03(\x15\xa0\xf2\x81z\x03(\xf5\xa0r\x83:\x05(\r\xa1ҁ\xfa\r(\r\xa0R\x9f:\a(5\xa02\x81z\x1f(\x15\xa02\x83:\x05(\xfd\xa0R\x80\xfa\r(\x1d\xa1ҁ:\x03(5\xa0r\xd6g\xf4\xb9\x8b\x97!\xabq\x1f\xcaN\xe0GJ\x1bE)\xfd\x14\xa1\xb6\xf2}(N\x8f(V\x83)އ(փ(N\xdcW҃\xb44\xa0}\x05(\xfbAJ\x19\xd02\x82\xb4\xfc\xa0\xbd\x03(kA\xca9\xd0R\x81\xb44\xa0\xfd\xb4\xbfҀ\xda\x05(3\xa0r\x87Z\x03(\x17\xa0\xf2\x81Z\x03(\xf3\xa0R\x83\xda\x05(\x0f\xa1ҁ\xda\r(\x13\xa02\x9fZ\a(7\xa02\x81Z\x1f(\x13\xa0Ҁ\xda\x05(\xff\xa0R\x80\xda\r "), + MaxTimeMs: 7140000, + Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"), }, + { + Type: prompb.Chunk_XOR, + MinTimeMs: 7200000, + MaxTimeMs: 7200000, + Data: []byte("\000\001\200\364\356\006@\307p\000\000\000\000\000\000"), + }, + }, + }, + }, + }, + { + ChunkedSeries: []*prompb.ChunkedSeries{ + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "test_metric1"}, + {Name: "b", Value: "c"}, + {Name: "baz", Value: "qux"}, + {Name: "d", Value: "e"}, + {Name: "foo", Value: "bar3"}, + }, + Chunks: []prompb.Chunk{ + { + Type: prompb.Chunk_XOR, + MaxTimeMs: 7140000, + Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"), + }, + { + Type: prompb.Chunk_XOR, + MinTimeMs: 7200000, + MaxTimeMs: 14340000, + Data: []byte("\000x\200\364\356\006@\307p\000\000\000\000\000\340\324\003\340>\224\355\260\277\322\200\372\005(=\240R\207:\003(\025\240\362\201z\003(\365\240r\203:\005(\r\241\322\201\372\r(\r\240R\237:\007(5\2402\201z\037(\025\2402\203:\005(\375\240R\200\372\r(\035\241\322\201:\003(5\240r\326g\364\271\213\227!\253q\037\312N\340GJ\033E)\375\024\241\266\362}(N\217(V\203)\336\207(\326\203(N\334W\322\203\2644\240}\005(\373AJ\031\3202\202\264\374\240\275\003(kA\3129\320R\201\2644\240\375\264\277\322\200\332\005(3\240r\207Z\003(\027\240\362\201Z\003(\363\240R\203\332\005(\017\241\322\201\332\r(\023\2402\237Z\007(7\2402\201Z\037(\023\240\322\200\332\005(\377\240R\200\332\r "), + }, + }, + }, + }, + }, + { + ChunkedSeries: []*prompb.ChunkedSeries{ + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "test_metric1"}, + {Name: "b", Value: "c"}, + {Name: "baz", Value: "qux"}, + {Name: "d", Value: "e"}, + {Name: "foo", Value: "bar3"}, + }, + Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, MinTimeMs: 14400000, MaxTimeMs: 14400000, - Data: []byte("\x00\x01\x80\xe8\xdd\r@\xd7p\x00\x00\x00\x00\x00\x00"), - }, - }, - }, - }, - }, - { - ChunkedSeries: []*prompb.ChunkedSeries{ - { - Labels: []prompb.Label{ - {Name: "__name__", Value: "test_metric1"}, - {Name: "b", Value: "c"}, - {Name: "baz", Value: "qux"}, - {Name: "d", Value: "e"}, - {Name: "foo", Value: "bar3"}, - }, - Chunks: []prompb.Chunk{ - { - Type: prompb.Chunk_XOR, - MaxTimeMs: 14340000, - Data: []byte("\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xd4\x03\xc2|\x05\x94\x00\xc1\xac}\xe9z2\xd0O\xed\xb4n[\aΔ\xa3md\xf9\xd0\xfd\x1aPm\nS\x9d\x0eQ\xad\x06P\xbd\xa8\xbfʁZ\x03(3\xa0R\x87\xda\x05(\x0f\xa0ҁ\xda=(\x13\xa02\x83Z\a(w\xa02\x81Z\x0f(\x13\xb5\x97\xf4P\x1b@\xa5\a\xf4\nP\x1bC\xa5\x02t\x1eP+@e\x1e\xf4\x0ePk@e\x02t:P;A\xa5\x01\xf4\nS\xfb@\xa5\x06t\x06P+C\xe5\x02\xf4\x06Pk@\xe5\x1et\nP\x1bA\xa5\x03\xf4:P\x1b@\xa5\x06t\x0eZJ\xff\\\x85ˈ\u05f8\x0f\xe5+F[\xc8\xe7E)\xed\x14\xa1\xf6\xe2}(v\x8d(N\x83)և(ރ(V\xdaW\xf2\x82t4\xa0m\x05(\xffAJ\x06\xd0҂t\xfc\xa0\xad\x03(oA\xca:\xd02\x82t4\xa0\xed\xb0\xbfҀ\xfa\x05(=\xa0R\x87:\x03(\x15\xa0\xf2\x81z\x03(\xf5\xa0r\x83:\x05(\r\xa1ҁ\xfa\r(\r\xa0R\x9f:\a(5\xa02\x81z\x1f(\x15\xa02\x83:\x05(\xfd\xa0R\x80\xfa\r(\x1d\xa1ҁ:\x03(5\xa0r\xd6g\xf4\xb9\x8b\x97!\xabq\x1f\xcaN\xe0GJ\x1bE)\xfd\x14\xa1\xb6\xf2}(N\x8f(V\x83)އ(փ(N\xdcW҃\xb44\xa0}\x05(\xfbAJ\x19\xd02\x82\xb4\xfc\xa0\xbd\x03(kA\xca9\xd0R\x81\xb44\xa0\xfd\xb4\xbfҀ\xda\x05(3\xa0r\x87Z\x03(\x17\xa0\xf2\x81Z\x03(\xf3\xa0R\x83\xda\x05(\x0f\xa1ҁ\xda\r(\x13\xa02\x9fZ\a(7\xa02\x81Z\x1f(\x13\xa0Ҁ\xda\x05(\xff\xa0R\x80\xda\r "), - }, - { - Type: prompb.Chunk_XOR, - MinTimeMs: 14400000, - MaxTimeMs: 28740000, - Data: []byte("\x00\xf0\x80\xe8\xdd\r@\xd7p\x00\x00\x00\x00\x00\xe0\xd4\x03\xe0G\xca+C)\xbd\x1c\xb6\x19\xfdh\x06P\x13\xa0i@v\x83\xa5\x00\xfa\x02\x94\x0fh\nP\xf3\xa0\x19@V\x81\xe5\x01z\x01\x94\x1dh\x0eP3\xa0)@6\x8f\xa5\x01\xfa\x06\x94\x03h\nPs\xa09@րe\x01z\x1f\x94\x05h\x06P3\xa0)A\xf6\x80\xa5\x00\xfa\x06\x94\ai\xfaP\x13\xa0\x19@ր\xe5\az\x01\x94\x05h\x1eP\x13\xa1\xe9@6\x80\xa5\x03\xfa\x02\x94\x03h:P\x13\xa0y@V\x80e\x1fz\x03\x94\rh\x06P\x13\xa0\xe9@v\x81\xa5\x00\xfa\x02\x94?h\nP3\xa0\x19@V\x83\xe5\x01z\x01\x94\rh\x0eZ\x8e\xff\xad\xccjSnC\xe9O\xdcH\xe9Ch\xa53\xa3\x97\x02}h2\x85\xe8\xf2\x85h2\x9c\xe8R\x8fhR\x83\xed\xe5}(;CJ\t\xd02\x8e\xb4\x1c\xa1\xbd\x03(+O\xca\t\xd0ҁ\xb4\x14\xa3\xfd\x05(\x1bCJ\tۋ\xff(\x15\xa02\x83z\a(u\xa02\x81:\r(\x1d\xa3Ҁ\xfa\x05(=\xa0R\x87:\x03(\x15\xa0\xf2\x81z\x03)\xf5\xa0r\x83:\x05(\r\xa1ҁ\xfa\r(\r\xa0R\x8f:\a(5\xa02\x81z\x1f(\x15\xa02\x83:\x05-\xa6\x7f\xda\x02\x94\x03\xe8\x1aP\x1d\xa0\xe9@N\x80e\x03Z\x03\x94=\xe8\x06P\x15\xa0y@N\x83\xa5\x00\xda\x02\x94\x0f\xe8\nP\r\xa3\xe9@N\x81\xe5\x01Z\x01\x94\x1d\xe8\x0eP5\xa0\x19@N\x87\xa5\x01\xda\x06\x94\x03\xe8\nP}\xa0)@\u0380e\x01Z\x7f\x94\x05\xe8\x06P5\xa09A\u0380\xa5\x00\xda\x06\x94\a\xe8zP\r\xa0)@\u0380\xe5\aZ\x01\x94\x05\xe8\x1eP\x15\xa0\x19G\u0380\xa5\x03\xda\x02\x94\x03\xe8:P\x1d\xa0i"), - }, - }, - }, - }, - }, - { - ChunkedSeries: []*prompb.ChunkedSeries{ - { - Labels: []prompb.Label{ - {Name: "__name__", Value: "test_metric1"}, - {Name: "b", Value: "c"}, - {Name: "baz", Value: "qux"}, - {Name: "d", Value: "e"}, - {Name: "foo", Value: "bar3"}, - }, - Chunks: []prompb.Chunk{ - { - Type: prompb.Chunk_XOR, - MinTimeMs: 28800000, - MaxTimeMs: 28800000, - Data: []byte("\x00\x01\x80л\x1b@\xe7p\x00\x00\x00\x00\x00\x00"), + Data: []byte("\000\001\200\350\335\r@\327p\000\000\000\000\000\000"), }, }, }, @@ -412,8 +409,8 @@ func TestStreamReadEndpoint(t *testing.T) { Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, - MaxTimeMs: 14340000, - Data: []byte("\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xd4\x03\xc2|\x05\x94\x00\xc1\xac}\xe9z2\xd0O\xed\xb4n[\aΔ\xa3md\xf9\xd0\xfd\x1aPm\nS\x9d\x0eQ\xad\x06P\xbd\xa8\xbfʁZ\x03(3\xa0R\x87\xda\x05(\x0f\xa0ҁ\xda=(\x13\xa02\x83Z\a(w\xa02\x81Z\x0f(\x13\xb5\x97\xf4P\x1b@\xa5\a\xf4\nP\x1bC\xa5\x02t\x1eP+@e\x1e\xf4\x0ePk@e\x02t:P;A\xa5\x01\xf4\nS\xfb@\xa5\x06t\x06P+C\xe5\x02\xf4\x06Pk@\xe5\x1et\nP\x1bA\xa5\x03\xf4:P\x1b@\xa5\x06t\x0eZJ\xff\\\x85ˈ\u05f8\x0f\xe5+F[\xc8\xe7E)\xed\x14\xa1\xf6\xe2}(v\x8d(N\x83)և(ރ(V\xdaW\xf2\x82t4\xa0m\x05(\xffAJ\x06\xd0҂t\xfc\xa0\xad\x03(oA\xca:\xd02\x82t4\xa0\xed\xb0\xbfҀ\xfa\x05(=\xa0R\x87:\x03(\x15\xa0\xf2\x81z\x03(\xf5\xa0r\x83:\x05(\r\xa1ҁ\xfa\r(\r\xa0R\x9f:\a(5\xa02\x81z\x1f(\x15\xa02\x83:\x05(\xfd\xa0R\x80\xfa\r(\x1d\xa1ҁ:\x03(5\xa0r\xd6g\xf4\xb9\x8b\x97!\xabq\x1f\xcaN\xe0GJ\x1bE)\xfd\x14\xa1\xb6\xf2}(N\x8f(V\x83)އ(փ(N\xdcW҃\xb44\xa0}\x05(\xfbAJ\x19\xd02\x82\xb4\xfc\xa0\xbd\x03(kA\xca9\xd0R\x81\xb44\xa0\xfd\xb4\xbfҀ\xda\x05(3\xa0r\x87Z\x03(\x17\xa0\xf2\x81Z\x03(\xf3\xa0R\x83\xda\x05(\x0f\xa1ҁ\xda\r(\x13\xa02\x9fZ\a(7\xa02\x81Z\x1f(\x13\xa0Ҁ\xda\x05(\xff\xa0R\x80\xda\r "), + MaxTimeMs: 7140000, + Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"), }, }, }, diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 14297c3dc..70639085e 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -247,8 +247,8 @@ func TestNoPanicAfterWALCorruption(t *testing.T) { var maxt int64 ctx := context.Background() { - // Appending 221 samples because on the 221st a new chunk will be created. - for i := 0; i < 221; i++ { + // Appending 121 samples because on the 121st a new chunk will be created. + for i := 0; i < 121; i++ { app := db.Appender(ctx) _, err := app.Append(0, labels.FromStrings("foo", "bar"), maxt, 0) expSamples = append(expSamples, sample{t: maxt, v: 0}) @@ -1089,9 +1089,9 @@ func TestWALReplayRaceOnSamplesLoggedBeforeSeries(t *testing.T) { numSamplesBeforeSeriesCreation = 1000 ) - // We test both with few and many samples appended after series creation. If samples are < 220 then there's no + // We test both with few and many samples appended after series creation. If samples are < 120 then there's no // mmap-ed chunk, otherwise there's at least 1 mmap-ed chunk when replaying the WAL. - for _, numSamplesAfterSeriesCreation := range []int{1, 2000} { + for _, numSamplesAfterSeriesCreation := range []int{1, 1000} { for run := 1; run <= numRuns; run++ { t.Run(fmt.Sprintf("samples after series creation = %d, run = %d", numSamplesAfterSeriesCreation, run), func(t *testing.T) { testWALReplayRaceOnSamplesLoggedBeforeSeries(t, numSamplesBeforeSeriesCreation, numSamplesAfterSeriesCreation) @@ -1160,8 +1160,8 @@ func testWALReplayRaceOnSamplesLoggedBeforeSeries(t *testing.T, numSamplesBefore } require.NoError(t, chunksIt.Err()) - // We expect 1 chunk every 220 samples after series creation. - require.Equalf(t, (numSamplesAfterSeriesCreation/220)+1, actualChunks, "series: %s", set.At().Labels().String()) + // We expect 1 chunk every 120 samples after series creation. + require.Equalf(t, (numSamplesAfterSeriesCreation/120)+1, actualChunks, "series: %s", set.At().Labels().String()) } require.NoError(t, set.Err()) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index df48e592d..80b71e927 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -808,7 +808,7 @@ func TestMemSeries_truncateChunks(t *testing.T) { s := newMemSeries(labels.FromStrings("a", "b"), 1, defaultIsolationDisabled, DefaultSamplesPerChunk) - for i := 0; i < 8000; i += 5 { + for i := 0; i < 4000; i += 5 { ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange) require.True(t, ok, "sample append failed") } @@ -825,9 +825,9 @@ func TestMemSeries_truncateChunks(t *testing.T) { require.NotNil(t, chk) require.NoError(t, err) - s.truncateChunksBefore(4000, 0) + s.truncateChunksBefore(2000, 0) - require.Equal(t, int64(4000), s.mmappedChunks[0].minTime) + require.Equal(t, int64(2000), s.mmappedChunks[0].minTime) _, _, err = s.chunk(0, chunkDiskMapper, &memChunkPool) require.Equal(t, storage.ErrNotFound, err, "first chunks not gone") require.Equal(t, countBefore/2, len(s.mmappedChunks)+1) // +1 for the head chunk. @@ -1364,9 +1364,9 @@ func TestMemSeries_append(t *testing.T) { require.Equal(t, int64(1000), s.headChunk.minTime, "wrong chunk range") require.Equal(t, int64(1001), s.headChunk.maxTime, "wrong chunk range") - // Fill the range [1000,3000) with many samples. Intermediate chunks should be cut - // at approximately 220 samples per chunk. - for i := 1; i < 2000; i++ { + // Fill the range [1000,2000) with many samples. Intermediate chunks should be cut + // at approximately 120 samples per chunk. + for i := 1; i < 1000; i++ { ok, _ := s.append(1001+int64(i), float64(i), 0, chunkDiskMapper, chunkRange) require.True(t, ok, "append failed") } @@ -1437,7 +1437,7 @@ func TestMemSeries_appendHistogram(t *testing.T) { } func TestMemSeries_append_atVariableRate(t *testing.T) { - const samplesPerChunk = 220 + const samplesPerChunk = 120 dir := t.TempDir() // This is usually taken from the Head, but passing manually here. chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize) @@ -2983,7 +2983,7 @@ func TestAppendHistogram(t *testing.T) { } func TestHistogramInWALAndMmapChunk(t *testing.T) { - head, _ := newTestHead(t, 6000, false, false) + head, _ := newTestHead(t, 3000, false, false) t.Cleanup(func() { require.NoError(t, head.Close()) }) @@ -2992,7 +2992,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { // Series with only histograms. s1 := labels.FromStrings("a", "b1") k1 := s1.String() - numHistograms := 600 + numHistograms := 300 exp := map[string][]tsdbutil.Sample{} ts := int64(0) var app storage.Appender @@ -3728,7 +3728,7 @@ func TestHistogramCounterResetHeader(t *testing.T) { checkExpCounterResetHeader(chunkenc.CounterReset) // Add 2 non-counter reset histograms. - for i := 0; i < 500; i++ { + for i := 0; i < 250; i++ { appendHistogram(h) } checkExpCounterResetHeader(chunkenc.NotCounterReset, chunkenc.NotCounterReset) @@ -3756,7 +3756,7 @@ func TestHistogramCounterResetHeader(t *testing.T) { checkExpCounterResetHeader(chunkenc.CounterReset) // Add 2 non-counter reset histograms. Just to have some non-counter reset chunks in between. - for i := 0; i < 500; i++ { + for i := 0; i < 250; i++ { appendHistogram(h) } checkExpCounterResetHeader(chunkenc.NotCounterReset, chunkenc.NotCounterReset) @@ -4223,7 +4223,7 @@ func TestHeadInit_DiscardChunksWithUnsupportedEncoding(t *testing.T) { h.chunkDiskMapper.WriteChunk(chunks.HeadSeriesRef(seriesRef), 500, 600, uc, false, func(err error) { require.NoError(t, err) }) app = h.Appender(ctx) - for i := 700; i < 1700; i++ { + for i := 700; i < 1200; i++ { _, err := app.Append(0, seriesLabels, int64(i), float64(i)) require.NoError(t, err) } From 79e4bdee8e0ea7219357bdefdbab62446a091855 Mon Sep 17 00:00:00 2001 From: ianwoolf Date: Sun, 27 Feb 2022 20:49:33 +0800 Subject: [PATCH 077/632] add Close for ActiveQueryTracker to close the file. Signed-off-by: ianwoolf --- promql/engine_test.go | 1 + promql/query_logger.go | 22 ++++++++++++++++------ promql/query_logger_test.go | 5 ++++- 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/promql/engine_test.go b/promql/engine_test.go index b64e32ba4..237ff7239 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -49,6 +49,7 @@ func TestQueryConcurrency(t *testing.T) { require.NoError(t, err) defer os.RemoveAll(dir) queryTracker := NewActiveQueryTracker(dir, maxConcurrency, nil) + t.Cleanup(queryTracker.Close) opts := EngineOpts{ Logger: nil, diff --git a/promql/query_logger.go b/promql/query_logger.go index 716e7749b..fa4e1fb07 100644 --- a/promql/query_logger.go +++ b/promql/query_logger.go @@ -16,6 +16,7 @@ package promql import ( "context" "encoding/json" + "io" "os" "path/filepath" "strings" @@ -31,6 +32,7 @@ type ActiveQueryTracker struct { mmapedFile []byte getNextIndex chan int logger log.Logger + closer io.Closer maxConcurrent int } @@ -81,7 +83,7 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { } } -func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, error) { +func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) { file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666) if err != nil { absPath, pathErr := filepath.Abs(filename) @@ -89,22 +91,22 @@ func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, er absPath = filename } level.Error(logger).Log("msg", "Error opening query log file", "file", absPath, "err", err) - return nil, err + return nil, nil, err } err = file.Truncate(int64(filesize)) if err != nil { level.Error(logger).Log("msg", "Error setting filesize.", "filesize", filesize, "err", err) - return nil, err + return nil, nil, err } fileAsBytes, err := mmap.Map(file, mmap.RDWR, 0) if err != nil { level.Error(logger).Log("msg", "Failed to mmap", "file", filename, "Attempted size", filesize, "err", err) - return nil, err + return nil, nil, err } - return fileAsBytes, err + return fileAsBytes, file, err } func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker { @@ -116,7 +118,7 @@ func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger lo filename, filesize := filepath.Join(localStoragePath, "queries.active"), 1+maxConcurrent*entrySize logUnfinishedQueries(filename, filesize, logger) - fileAsBytes, err := getMMapedFile(filename, filesize, logger) + fileAsBytes, closer, err := getMMapedFile(filename, filesize, logger) if err != nil { panic("Unable to create mmap-ed active query log") } @@ -124,6 +126,7 @@ func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger lo copy(fileAsBytes, "[") activeQueryTracker := ActiveQueryTracker{ mmapedFile: fileAsBytes, + closer: closer, getNextIndex: make(chan int, maxConcurrent), logger: logger, maxConcurrent: maxConcurrent, @@ -198,3 +201,10 @@ func (tracker ActiveQueryTracker) Insert(ctx context.Context, query string) (int return 0, ctx.Err() } } + +func (tracker *ActiveQueryTracker) Close() { + if tracker == nil || tracker.closer == nil { + return + } + tracker.closer.Close() +} diff --git a/promql/query_logger_test.go b/promql/query_logger_test.go index ad76fb992..ce55fecbb 100644 --- a/promql/query_logger_test.go +++ b/promql/query_logger_test.go @@ -110,7 +110,10 @@ func TestMMapFile(t *testing.T) { filename := file.Name() defer os.Remove(filename) - fileAsBytes, err := getMMapedFile(filename, 2, nil) + fileAsBytes, closer, err := getMMapedFile(filename, 2, nil) + if err != nil { + t.Cleanup(func() { closer.Close() }) + } require.NoError(t, err) copy(fileAsBytes, "ab") From 5d4ec08a1fd65ff84ea16bca22ad3b20b91604f6 Mon Sep 17 00:00:00 2001 From: Sebastian Rabenhorst Date: Fri, 14 Apr 2023 11:59:30 +0200 Subject: [PATCH 078/632] Fixed sampleRingIterator for mixed histograms Signed-off-by: Sebastian Rabenhorst Fixed sampleRingIterator for mixed histograms Signed-off-by: Sebastian Rabenhorst Fixed lint --- storage/buffer.go | 2 ++ storage/buffer_test.go | 23 +++++++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/storage/buffer.go b/storage/buffer.go index 27ac21661..2229e5259 100644 --- a/storage/buffer.go +++ b/storage/buffer.go @@ -332,9 +332,11 @@ func (it *sampleRingIterator) Next() chunkenc.ValueType { switch s.Type() { case chunkenc.ValHistogram: it.h = s.H() + it.fh = nil return chunkenc.ValHistogram case chunkenc.ValFloatHistogram: it.fh = s.FH() + it.h = nil return chunkenc.ValFloatHistogram default: it.f = s.F() diff --git a/storage/buffer_test.go b/storage/buffer_test.go index ebe24d8df..70cd7f4ff 100644 --- a/storage/buffer_test.go +++ b/storage/buffer_test.go @@ -21,6 +21,7 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/tsdbutil" ) func TestSampleRing(t *testing.T) { @@ -180,6 +181,28 @@ func TestBufferedSeriesIteratorNoBadAt(t *testing.T) { it.Next() } +func TestBufferedSeriesIteratorMixedHistograms(t *testing.T) { + histograms := tsdbutil.GenerateTestHistograms(2) + + it := NewBufferIterator(NewListSeriesIterator(samples{ + fhSample{t: 1, fh: histograms[0].ToFloat()}, + hSample{t: 2, h: histograms[1]}, + }), 2) + + require.Equal(t, chunkenc.ValNone, it.Seek(3)) + require.NoError(t, it.Err()) + + buf := it.Buffer() + + require.Equal(t, chunkenc.ValFloatHistogram, buf.Next()) + _, fh := buf.AtFloatHistogram() + require.Equal(t, histograms[0].ToFloat(), fh) + + require.Equal(t, chunkenc.ValHistogram, buf.Next()) + _, fh = buf.AtFloatHistogram() + require.Equal(t, histograms[1].ToFloat(), fh) +} + func BenchmarkBufferedSeriesIterator(b *testing.B) { // Simulate a 5 minute rate. it := NewBufferIterator(newFakeSeriesIterator(int64(b.N), 30), 5*60) From 1801cd41966be6c9566bb0104cfa7bafc2a6a7fa Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 14 Apr 2023 14:37:35 +0000 Subject: [PATCH 079/632] labels: small optimization to stringlabels Add a fast path for the common case that a string is less than 127 bytes long, to skip a shift and the loop. Signed-off-by: Bryan Boreham --- model/labels/labels_string.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/model/labels/labels_string.go b/model/labels/labels_string.go index f293c9167..ff46103eb 100644 --- a/model/labels/labels_string.go +++ b/model/labels/labels_string.go @@ -56,8 +56,14 @@ func (ls labelSlice) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } func (ls labelSlice) Less(i, j int) bool { return ls[i].Name < ls[j].Name } func decodeSize(data string, index int) (int, int) { - var size int - for shift := uint(0); ; shift += 7 { + // Fast-path for common case of a single byte, value 0..127. + b := data[index] + index++ + if b < 0x80 { + return int(b), index + } + size := int(b & 0x7F) + for shift := uint(7); ; shift += 7 { // Just panic if we go of the end of data, since all Labels strings are constructed internally and // malformed data indicates a bug, or memory corruption. b := data[index] From fd3630b9a3c7accecbf7fd438706c43612cfc568 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Mon, 17 Apr 2023 21:32:38 -0700 Subject: [PATCH 080/632] add ctx to QueryEngine interface Signed-off-by: Ben Ye --- cmd/promtool/unittest.go | 2 +- promql/bench_test.go | 5 ++-- promql/engine.go | 4 ++-- promql/engine_test.go | 52 +++++++++++++++++++++------------------- promql/functions_test.go | 5 ++-- promql/promql_test.go | 5 ++-- promql/test.go | 4 ++-- rules/manager.go | 2 +- web/api/v1/api.go | 8 +++---- 9 files changed, 46 insertions(+), 41 deletions(-) diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index b3e6f67f9..b9d8e0527 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -434,7 +434,7 @@ func (tg *testGroup) maxEvalTime() time.Duration { } func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, qu storage.Queryable) (promql.Vector, error) { - q, err := engine.NewInstantQuery(qu, nil, qs, t) + q, err := engine.NewInstantQuery(ctx, qu, nil, qs, t) if err != nil { return nil, err } diff --git a/promql/bench_test.go b/promql/bench_test.go index 88025d932..3d3588447 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -240,16 +240,17 @@ func BenchmarkRangeQuery(b *testing.B) { for _, c := range cases { name := fmt.Sprintf("expr=%s,steps=%d", c.expr, c.steps) b.Run(name, func(b *testing.B) { + ctx := context.Background() b.ReportAllocs() for i := 0; i < b.N; i++ { qry, err := engine.NewRangeQuery( - stor, nil, c.expr, + ctx, stor, nil, c.expr, time.Unix(int64((numIntervals-c.steps)*10), 0), time.Unix(int64(numIntervals*10), 0), time.Second*10) if err != nil { b.Fatal(err) } - res := qry.Exec(context.Background()) + res := qry.Exec(ctx) if res.Err != nil { b.Fatal(res.Err) } diff --git a/promql/engine.go b/promql/engine.go index b49be244f..fae4f0932 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -400,7 +400,7 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { } // NewInstantQuery returns an evaluation query for the given expression at the given time. -func (ng *Engine) NewInstantQuery(q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { +func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { expr, err := parser.ParseExpr(qs) if err != nil { return nil, err @@ -416,7 +416,7 @@ func (ng *Engine) NewInstantQuery(q storage.Queryable, opts *QueryOpts, qs strin // NewRangeQuery returns an evaluation query for the given time range and with // the resolution set by the interval. -func (ng *Engine) NewRangeQuery(q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { +func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { expr, err := parser.ParseExpr(qs) if err != nil { return nil, err diff --git a/promql/engine_test.go b/promql/engine_test.go index b64e32ba4..2941a2240 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -231,14 +231,14 @@ func TestQueryError(t *testing.T) { ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() - vectorQuery, err := engine.NewInstantQuery(queryable, nil, "foo", time.Unix(1, 0)) + vectorQuery, err := engine.NewInstantQuery(ctx, queryable, nil, "foo", time.Unix(1, 0)) require.NoError(t, err) res := vectorQuery.Exec(ctx) require.Error(t, res.Err, "expected error on failed select but got none") require.True(t, errors.Is(res.Err, errStorage), "expected error doesn't match") - matrixQuery, err := engine.NewInstantQuery(queryable, nil, "foo[1m]", time.Unix(1, 0)) + matrixQuery, err := engine.NewInstantQuery(ctx, queryable, nil, "foo[1m]", time.Unix(1, 0)) require.NoError(t, err) res = matrixQuery.Exec(ctx) @@ -564,14 +564,15 @@ func TestSelectHintsSetCorrectly(t *testing.T) { query Query err error ) + ctx := context.Background() if tc.end == 0 { - query, err = engine.NewInstantQuery(hintsRecorder, nil, tc.query, timestamp.Time(tc.start)) + query, err = engine.NewInstantQuery(ctx, hintsRecorder, nil, tc.query, timestamp.Time(tc.start)) } else { - query, err = engine.NewRangeQuery(hintsRecorder, nil, tc.query, timestamp.Time(tc.start), timestamp.Time(tc.end), time.Second) + query, err = engine.NewRangeQuery(ctx, hintsRecorder, nil, tc.query, timestamp.Time(tc.start), timestamp.Time(tc.end), time.Second) } require.NoError(t, err) - res := query.Exec(context.Background()) + res := query.Exec(ctx) require.NoError(t, res.Err) require.Equal(t, tc.expected, hintsRecorder.hints) @@ -727,9 +728,9 @@ load 10s var err error var qry Query if c.Interval == 0 { - qry, err = test.QueryEngine().NewInstantQuery(test.Queryable(), nil, c.Query, c.Start) + qry, err = test.QueryEngine().NewInstantQuery(test.context, test.Queryable(), nil, c.Query, c.Start) } else { - qry, err = test.QueryEngine().NewRangeQuery(test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval) + qry, err = test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval) } require.NoError(t, err) @@ -1204,9 +1205,9 @@ load 10s var err error var qry Query if c.Interval == 0 { - qry, err = engine.NewInstantQuery(test.Queryable(), opts, c.Query, c.Start) + qry, err = engine.NewInstantQuery(test.context, test.Queryable(), opts, c.Query, c.Start) } else { - qry, err = engine.NewRangeQuery(test.Queryable(), opts, c.Query, c.Start, c.End, c.Interval) + qry, err = engine.NewRangeQuery(test.context, test.Queryable(), opts, c.Query, c.Start, c.End, c.Interval) } require.NoError(t, err) @@ -1387,9 +1388,9 @@ load 10s var err error var qry Query if c.Interval == 0 { - qry, err = engine.NewInstantQuery(test.Queryable(), nil, c.Query, c.Start) + qry, err = engine.NewInstantQuery(test.context, test.Queryable(), nil, c.Query, c.Start) } else { - qry, err = engine.NewRangeQuery(test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval) + qry, err = engine.NewRangeQuery(test.context, test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval) } require.NoError(t, err) @@ -1628,9 +1629,9 @@ load 1ms var err error var qry Query if c.end == 0 { - qry, err = test.QueryEngine().NewInstantQuery(test.Queryable(), nil, c.query, start) + qry, err = test.QueryEngine().NewInstantQuery(test.context, test.Queryable(), nil, c.query, start) } else { - qry, err = test.QueryEngine().NewRangeQuery(test.Queryable(), nil, c.query, start, end, interval) + qry, err = test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, c.query, start, end, interval) } require.NoError(t, err) @@ -1961,7 +1962,7 @@ func TestSubquerySelector(t *testing.T) { engine := test.QueryEngine() for _, c := range tst.cases { t.Run(c.Query, func(t *testing.T) { - qry, err := engine.NewInstantQuery(test.Queryable(), nil, c.Query, c.Start) + qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, c.Query, c.Start) require.NoError(t, err) res := qry.Exec(test.Context()) @@ -2909,6 +2910,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { } func TestEngineOptsValidation(t *testing.T) { + ctx := context.Background() cases := []struct { opts EngineOpts query string @@ -2968,8 +2970,8 @@ func TestEngineOptsValidation(t *testing.T) { for _, c := range cases { eng := NewEngine(c.opts) - _, err1 := eng.NewInstantQuery(nil, nil, c.query, time.Unix(10, 0)) - _, err2 := eng.NewRangeQuery(nil, nil, c.query, time.Unix(0, 0), time.Unix(10, 0), time.Second) + _, err1 := eng.NewInstantQuery(ctx, nil, nil, c.query, time.Unix(10, 0)) + _, err2 := eng.NewRangeQuery(ctx, nil, nil, c.query, time.Unix(0, 0), time.Unix(10, 0), time.Second) if c.fail { require.Equal(t, c.expError, err1) require.Equal(t, c.expError, err2) @@ -3116,7 +3118,7 @@ func TestRangeQuery(t *testing.T) { err = test.Run() require.NoError(t, err) - qry, err := test.QueryEngine().NewRangeQuery(test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval) + qry, err := test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval) require.NoError(t, err) res := qry.Exec(test.Context()) @@ -3147,7 +3149,7 @@ func TestNativeHistogramRate(t *testing.T) { engine := test.QueryEngine() queryString := fmt.Sprintf("rate(%s[1m])", seriesName) - qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond))) + qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond))) require.NoError(t, err) res := qry.Exec(test.Context()) require.NoError(t, res.Err) @@ -3191,7 +3193,7 @@ func TestNativeFloatHistogramRate(t *testing.T) { engine := test.QueryEngine() queryString := fmt.Sprintf("rate(%s[1m])", seriesName) - qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond))) + qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond))) require.NoError(t, err) res := qry.Exec(test.Context()) require.NoError(t, res.Err) @@ -3255,7 +3257,7 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { require.NoError(t, app.Commit()) queryString := fmt.Sprintf("histogram_count(%s)", seriesName) - qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts)) + qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) require.NoError(t, err) res := qry.Exec(test.Context()) @@ -3273,7 +3275,7 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { } queryString = fmt.Sprintf("histogram_sum(%s)", seriesName) - qry, err = engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts)) + qry, err = engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) require.NoError(t, err) res = qry.Exec(test.Context()) @@ -3509,7 +3511,7 @@ func TestNativeHistogram_HistogramQuantile(t *testing.T) { for j, sc := range c.subCases { t.Run(fmt.Sprintf("%d %s", j, sc.quantile), func(t *testing.T) { queryString := fmt.Sprintf("histogram_quantile(%s, %s)", sc.quantile, seriesName) - qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts)) + qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) require.NoError(t, err) res := qry.Exec(test.Context()) @@ -3940,7 +3942,7 @@ func TestNativeHistogram_HistogramFraction(t *testing.T) { for j, sc := range c.subCases { t.Run(fmt.Sprintf("%d %s %s", j, sc.lower, sc.upper), func(t *testing.T) { queryString := fmt.Sprintf("histogram_fraction(%s, %s, %s)", sc.lower, sc.upper, seriesName) - qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts)) + qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) require.NoError(t, err) res := qry.Exec(test.Context()) @@ -4077,7 +4079,7 @@ func TestNativeHistogram_Sum_Count_AddOperator(t *testing.T) { require.NoError(t, app.Commit()) queryAndCheck := func(queryString string, exp Vector) { - qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts)) + qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) require.NoError(t, err) res := qry.Exec(test.Context()) @@ -4186,7 +4188,7 @@ metric 0 1 2 opts := &QueryOpts{ LookbackDelta: c.queryLookback, } - qry, err := eng.NewInstantQuery(test.Queryable(), opts, query, c.ts) + qry, err := eng.NewInstantQuery(test.context, test.Queryable(), opts, query, c.ts) require.NoError(t, err) res := qry.Exec(test.Context()) diff --git a/promql/functions_test.go b/promql/functions_test.go index e552424b3..8df5684b8 100644 --- a/promql/functions_test.go +++ b/promql/functions_test.go @@ -56,10 +56,11 @@ func TestDeriv(t *testing.T) { require.NoError(t, a.Commit()) - query, err := engine.NewInstantQuery(storage, nil, "deriv(foo[30m])", timestamp.Time(1493712846939)) + ctx := context.Background() + query, err := engine.NewInstantQuery(ctx, storage, nil, "deriv(foo[30m])", timestamp.Time(1493712846939)) require.NoError(t, err) - result := query.Exec(context.Background()) + result := query.Exec(ctx) require.NoError(t, result.Err) vec, _ := result.Vector() diff --git a/promql/promql_test.go b/promql/promql_test.go index 68bfa1d95..a07a0f5cb 100644 --- a/promql/promql_test.go +++ b/promql/promql_test.go @@ -81,14 +81,15 @@ func TestConcurrentRangeQueries(t *testing.T) { defer func() { sem <- struct{}{} }() + ctx := context.Background() qry, err := engine.NewRangeQuery( - stor, nil, c.expr, + ctx, stor, nil, c.expr, time.Unix(int64((numIntervals-c.steps)*10), 0), time.Unix(int64(numIntervals*10), 0), time.Second*10) if err != nil { return err } - res := qry.Exec(context.Background()) + res := qry.Exec(ctx) if res.Err != nil { t.Logf("Query: %q, steps: %d, result: %s", c.expr, c.steps, res.Err) return res.Err diff --git a/promql/test.go b/promql/test.go index 0b3958393..64fa66d5d 100644 --- a/promql/test.go +++ b/promql/test.go @@ -538,7 +538,7 @@ func (t *Test) exec(tc testCommand) error { } queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...) for _, iq := range queries { - q, err := t.QueryEngine().NewInstantQuery(t.storage, nil, iq.expr, iq.evalTime) + q, err := t.QueryEngine().NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime) if err != nil { return err } @@ -560,7 +560,7 @@ func (t *Test) exec(tc testCommand) error { // Check query returns same result in range mode, // by checking against the middle step. - q, err = t.queryEngine.NewRangeQuery(t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute) + q, err = t.queryEngine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute) if err != nil { return err } diff --git a/rules/manager.go b/rules/manager.go index 07d50be1b..b6513e82d 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -189,7 +189,7 @@ type QueryFunc func(ctx context.Context, q string, t time.Time) (promql.Vector, // It converts scalar into vector results. func EngineQueryFunc(engine *promql.Engine, q storage.Queryable) QueryFunc { return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { - q, err := engine.NewInstantQuery(q, nil, qs, t) + q, err := engine.NewInstantQuery(ctx, q, nil, qs, t) if err != nil { return nil, err } diff --git a/web/api/v1/api.go b/web/api/v1/api.go index aeea87ca7..dde814eb0 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -177,8 +177,8 @@ type TSDBAdminStats interface { // QueryEngine defines the interface for the *promql.Engine, so it can be replaced, wrapped or mocked. type QueryEngine interface { SetQueryLogger(l promql.QueryLogger) - NewInstantQuery(q storage.Queryable, opts *promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) - NewRangeQuery(q storage.Queryable, opts *promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) + NewInstantQuery(ctx context.Context, q storage.Queryable, opts *promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) + NewRangeQuery(ctx context.Context, q storage.Queryable, opts *promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) } // API can register a set of endpoints in a router and handle @@ -417,7 +417,7 @@ func (api *API) query(r *http.Request) (result apiFuncResult) { if err != nil { return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} } - qry, err := api.QueryEngine.NewInstantQuery(api.Queryable, opts, r.FormValue("query"), ts) + qry, err := api.QueryEngine.NewInstantQuery(ctx, api.Queryable, opts, r.FormValue("query"), ts) if err != nil { return invalidParamError(err, "query") } @@ -520,7 +520,7 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) { if err != nil { return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} } - qry, err := api.QueryEngine.NewRangeQuery(api.Queryable, opts, r.FormValue("query"), start, end, step) + qry, err := api.QueryEngine.NewRangeQuery(ctx, api.Queryable, opts, r.FormValue("query"), start, end, step) if err != nil { return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} } From f3394bf7a1f362b88d89cb0784be47b75d8fa152 Mon Sep 17 00:00:00 2001 From: gotjosh Date: Tue, 18 Apr 2023 10:07:32 +0100 Subject: [PATCH 081/632] Rules API: Allow filtering by rule name Introduces support for a new query parameter in the `/rules` API endpoint that allows filtering by rule names. If all the rules of a group are filtered, we skip the group entirely. Signed-off-by: gotjosh --- docs/querying/api.md | 2 ++ web/api/v1/api.go | 33 +++++++++++++++++++++++++++------ web/api/v1/api_test.go | 27 +++++++++++++++++++++++++++ 3 files changed, 56 insertions(+), 6 deletions(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index f2182a205..0cc549b65 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -673,7 +673,9 @@ GET /api/v1/rules ``` URL query parameters: + - `type=alert|record`: return only the alerting rules (e.g. `type=alert`) or the recording rules (e.g. `type=record`). When the parameter is absent or empty, no filtering is done. +- `rules=alertName,RuleName`: return only the alerting and recording rules with the specified names. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. ```json $ curl http://localhost:9090/api/v1/rules diff --git a/web/api/v1/api.go b/web/api/v1/api.go index aeea87ca7..d95595804 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1296,6 +1296,16 @@ func (api *API) rules(r *http.Request) apiFuncResult { res := &RuleDiscovery{RuleGroups: make([]*RuleGroup, len(ruleGroups))} typ := strings.ToLower(r.URL.Query().Get("type")) + // Parse the rule names into a comma separated list of rule names, then create a set. + rulesQuery := r.URL.Query().Get("rules") + ruleNamesSet := map[string]struct{}{} + if rulesQuery != "" { + names := strings.Split(rulesQuery, ",") + for _, rn := range names { + ruleNamesSet[strings.TrimSpace(rn)] = struct{}{} + } + } + if typ != "" && typ != "alert" && typ != "record" { return invalidParamError(errors.Errorf("not supported value %q", typ), "type") } @@ -1313,14 +1323,20 @@ func (api *API) rules(r *http.Request) apiFuncResult { EvaluationTime: grp.GetEvaluationTime().Seconds(), LastEvaluation: grp.GetLastEvaluation(), } - for _, r := range grp.Rules() { + for _, rr := range grp.Rules() { var enrichedRule Rule - lastError := "" - if r.LastError() != nil { - lastError = r.LastError().Error() + if len(ruleNamesSet) > 0 { + if _, ok := ruleNamesSet[rr.Name()]; !ok { + continue + } } - switch rule := r.(type) { + + lastError := "" + if rr.LastError() != nil { + lastError = rr.LastError().Error() + } + switch rule := rr.(type) { case *rules.AlertingRule: if !returnAlerts { break @@ -1358,11 +1374,16 @@ func (api *API) rules(r *http.Request) apiFuncResult { err := errors.Errorf("failed to assert type of rule '%v'", rule.Name()) return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil} } + if enrichedRule != nil { apiRuleGroup.Rules = append(apiRuleGroup.Rules, enrichedRule) } } - res.RuleGroups[i] = apiRuleGroup + + // If the rule group response has no rules, skip it - this means we filtered all the rules of this group. + if len(apiRuleGroup.Rules) > 0 { + res.RuleGroups[i] = apiRuleGroup + } } return apiFuncResult{res, nil, nil, nil} } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index efce04221..e354bf298 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -1973,6 +1973,33 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }, }, }, + { + endpoint: api.rules, + query: url.Values{"rules": []string{"test_metric4"}}, + response: &RuleDiscovery{ + RuleGroups: []*RuleGroup{ + { + Name: "grp", + File: "/path/to/file", + Interval: 1, + Limit: 0, + Rules: []Rule{ + AlertingRule{ + State: "inactive", + Name: "test_metric4", + Query: "up == 1", + Duration: 1, + Labels: labels.Labels{}, + Annotations: labels.Labels{}, + Alerts: []*Alert{}, + Health: "unknown", + Type: "alerting", + }, + }, + }, + }, + }, + }, { endpoint: api.queryExemplars, query: url.Values{ From b02811233170bc40fd43fc8a65313f0b64ee6191 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=90urica=20Yuri=20Nikoli=C4=87?= Date: Tue, 18 Apr 2023 12:13:05 +0200 Subject: [PATCH 082/632] Making the number of CPU cores used for sorting postings lists editable (#12247) Signed-off-by: Yuri Nikolic --- tsdb/head.go | 2 +- tsdb/index/postings.go | 14 ++++++++++---- tsdb/index/postings_test.go | 4 ++-- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/tsdb/head.go b/tsdb/head.go index 4696884f2..1d65d3229 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -574,7 +574,7 @@ const cardinalityCacheExpirationTime = time.Duration(30) * time.Second func (h *Head) Init(minValidTime int64) error { h.minValidTime.Store(minValidTime) defer func() { - h.postings.EnsureOrder() + h.postings.EnsureOrder(h.opts.WALReplayConcurrency) }() defer h.gc() // After loading the wal remove the obsolete data from the head. defer func() { diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index b55d70df0..15df374fc 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -224,7 +224,10 @@ func (p *MemPostings) All() Postings { // EnsureOrder ensures that all postings lists are sorted. After it returns all further // calls to add and addFor will insert new IDs in a sorted manner. -func (p *MemPostings) EnsureOrder() { +// Parameter numberOfConcurrentProcesses is used to specify the maximal number of +// CPU cores used for this operation. If it is <= 0, GOMAXPROCS is used. +// GOMAXPROCS was the default before introducing this parameter. +func (p *MemPostings) EnsureOrder(numberOfConcurrentProcesses int) { p.mtx.Lock() defer p.mtx.Unlock() @@ -232,13 +235,16 @@ func (p *MemPostings) EnsureOrder() { return } - n := runtime.GOMAXPROCS(0) + concurrency := numberOfConcurrentProcesses + if concurrency <= 0 { + concurrency = runtime.GOMAXPROCS(0) + } workc := make(chan *[][]storage.SeriesRef) var wg sync.WaitGroup - wg.Add(n) + wg.Add(concurrency) - for i := 0; i < n; i++ { + for i := 0; i < concurrency; i++ { go func() { for job := range workc { for _, l := range *job { diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index 1b1ecd3c3..a34f3c12d 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -54,7 +54,7 @@ func TestMemPostings_ensureOrder(t *testing.T) { p.m["a"][v] = l } - p.EnsureOrder() + p.EnsureOrder(0) for _, e := range p.m { for _, l := range e { @@ -114,7 +114,7 @@ func BenchmarkMemPostings_ensureOrder(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - p.EnsureOrder() + p.EnsureOrder(0) p.ordered = false } }) From 96b6463f2587f8c3da1031bfd5bc6b9aca575733 Mon Sep 17 00:00:00 2001 From: gotjosh Date: Tue, 18 Apr 2023 16:26:21 +0100 Subject: [PATCH 083/632] review comments Signed-off-by: gotjosh --- web/api/v1/api.go | 42 ++++++++++++++++++++++++++++++------------ web/api/v1/api_test.go | 34 +++++++++++++++++++++++++++++++++- 2 files changed, 63 insertions(+), 13 deletions(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index d95595804..9a13e09d9 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1292,20 +1292,26 @@ type RecordingRule struct { } func (api *API) rules(r *http.Request) apiFuncResult { + if err := r.ParseForm(); err != nil { + return apiFuncResult{nil, &apiError{errorBadData, errors.Wrapf(err, "error parsing form values")}, nil, nil} + } + + queryFormToSet := func(values []string) map[string]struct{} { + set := make(map[string]struct{}, len(values)) + for _, v := range values { + set[v] = struct{}{} + } + return set + } + + rnSet := queryFormToSet(r.Form["rule_name[]"]) + rgSet := queryFormToSet(r.Form["rule_group[]"]) + fSet := queryFormToSet(r.Form["file[]"]) + ruleGroups := api.rulesRetriever(r.Context()).RuleGroups() res := &RuleDiscovery{RuleGroups: make([]*RuleGroup, len(ruleGroups))} typ := strings.ToLower(r.URL.Query().Get("type")) - // Parse the rule names into a comma separated list of rule names, then create a set. - rulesQuery := r.URL.Query().Get("rules") - ruleNamesSet := map[string]struct{}{} - if rulesQuery != "" { - names := strings.Split(rulesQuery, ",") - for _, rn := range names { - ruleNamesSet[strings.TrimSpace(rn)] = struct{}{} - } - } - if typ != "" && typ != "alert" && typ != "record" { return invalidParamError(errors.Errorf("not supported value %q", typ), "type") } @@ -1314,6 +1320,18 @@ func (api *API) rules(r *http.Request) apiFuncResult { returnRecording := typ == "" || typ == "record" for i, grp := range ruleGroups { + if len(rgSet) > 0 { + if _, ok := rgSet[grp.Name()]; !ok { + continue + } + } + + if len(fSet) > 0 { + if _, ok := fSet[grp.File()]; !ok { + continue + } + } + apiRuleGroup := &RuleGroup{ Name: grp.Name(), File: grp.File(), @@ -1326,8 +1344,8 @@ func (api *API) rules(r *http.Request) apiFuncResult { for _, rr := range grp.Rules() { var enrichedRule Rule - if len(ruleNamesSet) > 0 { - if _, ok := ruleNamesSet[rr.Name()]; !ok { + if len(rnSet) > 0 { + if _, ok := rnSet[rr.Name()]; !ok { continue } } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index e354bf298..c3e1bf59d 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -1975,7 +1975,39 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }, { endpoint: api.rules, - query: url.Values{"rules": []string{"test_metric4"}}, + query: url.Values{"rule_name[]": []string{"test_metric4"}}, + response: &RuleDiscovery{ + RuleGroups: []*RuleGroup{ + { + Name: "grp", + File: "/path/to/file", + Interval: 1, + Limit: 0, + Rules: []Rule{ + AlertingRule{ + State: "inactive", + Name: "test_metric4", + Query: "up == 1", + Duration: 1, + Labels: labels.Labels{}, + Annotations: labels.Labels{}, + Alerts: []*Alert{}, + Health: "unknown", + Type: "alerting", + }, + }, + }, + }, + }, + }, + { + endpoint: api.rules, + query: url.Values{"rule_group[]": []string{"respond-with-nothing"}}, + response: &RuleDiscovery{RuleGroups: []*RuleGroup{nil}}, + }, + { + endpoint: api.rules, + query: url.Values{"file[]": []string{"/path/to/file"}, "rule_name[]": []string{"test_metric4"}}, response: &RuleDiscovery{ RuleGroups: []*RuleGroup{ { From e2a2790b2c830e903e28562d667b0a03adb3beeb Mon Sep 17 00:00:00 2001 From: gotjosh Date: Tue, 18 Apr 2023 16:50:16 +0100 Subject: [PATCH 084/632] add more docs Signed-off-by: gotjosh --- docs/querying/api.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index 0cc549b65..bc0587dd0 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -675,7 +675,9 @@ GET /api/v1/rules URL query parameters: - `type=alert|record`: return only the alerting rules (e.g. `type=alert`) or the recording rules (e.g. `type=record`). When the parameter is absent or empty, no filtering is done. -- `rules=alertName,RuleName`: return only the alerting and recording rules with the specified names. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. +- `rule_name[]=`: return the groups and rules of the specified alerting and recording rules names, the parameter supports repetition. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. +- `rule_group[]=`: return the group and rules of the specified rule group names, the parameter supports repetitions. When the parameter is absent or empty, no filtering is done. +- `file[]=`: return the group and rules of specified filepath for rule groups, the parameter supports repetition. When the parameter is absent or empty, no filtering is done. ```json $ curl http://localhost:9090/api/v1/rules From 28909a46362737c218039dbf952d13e200b31c1f Mon Sep 17 00:00:00 2001 From: gotjosh Date: Tue, 18 Apr 2023 16:51:35 +0100 Subject: [PATCH 085/632] more worthsmithing Signed-off-by: gotjosh --- docs/querying/api.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index bc0587dd0..c6d8d2c83 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -675,9 +675,9 @@ GET /api/v1/rules URL query parameters: - `type=alert|record`: return only the alerting rules (e.g. `type=alert`) or the recording rules (e.g. `type=record`). When the parameter is absent or empty, no filtering is done. -- `rule_name[]=`: return the groups and rules of the specified alerting and recording rules names, the parameter supports repetition. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. -- `rule_group[]=`: return the group and rules of the specified rule group names, the parameter supports repetitions. When the parameter is absent or empty, no filtering is done. -- `file[]=`: return the group and rules of specified filepath for rule groups, the parameter supports repetition. When the parameter is absent or empty, no filtering is done. +- `rule_name[]=`: return the groups and its rules of the specified alerting and recording rules names, the parameter supports repetition. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. +- `rule_group[]=`: return the groups and its rules of the specified rule group names, the parameter supports repetitions. When the parameter is absent or empty, no filtering is done. +- `file[]=`: return the groups and its rules of specified filepath for rule groups, the parameter supports repetition. When the parameter is absent or empty, no filtering is done. ```json $ curl http://localhost:9090/api/v1/rules From cf230bcd18bbbb429a46985049b049abb3437140 Mon Sep 17 00:00:00 2001 From: gotjosh Date: Wed, 19 Apr 2023 09:49:34 +0100 Subject: [PATCH 086/632] more wordsmithing Signed-off-by: gotjosh --- docs/querying/api.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index c6d8d2c83..820414fb1 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -675,9 +675,9 @@ GET /api/v1/rules URL query parameters: - `type=alert|record`: return only the alerting rules (e.g. `type=alert`) or the recording rules (e.g. `type=record`). When the parameter is absent or empty, no filtering is done. -- `rule_name[]=`: return the groups and its rules of the specified alerting and recording rules names, the parameter supports repetition. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. -- `rule_group[]=`: return the groups and its rules of the specified rule group names, the parameter supports repetitions. When the parameter is absent or empty, no filtering is done. -- `file[]=`: return the groups and its rules of specified filepath for rule groups, the parameter supports repetition. When the parameter is absent or empty, no filtering is done. +- `rule_name[]=`: only return rules with the given rule name. If the parameter is repeated, rules with any of provided names are returned. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. +- `rule_group[]=`: only return rules with the given rule group name. If the parameter is repeated, rules with any of provided rule group names are returned. When the parameter is absent or empty, no filtering is done. +- `file[]=`: only return rules with the given filepath. If the parameter is repeated, rules with any of provided filepaths are returned. When the parameter is absent or empty, no filtering is done. ```json $ curl http://localhost:9090/api/v1/rules From c3c7d44d845e08969845133a8c64c631ddbbb437 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 12 Apr 2023 13:05:41 +0200 Subject: [PATCH 087/632] lint: Adjust to the lint warnings raised by current versions of golint-ci MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We haven't updated golint-ci in our CI yet, but this commit prepares for that. There are a lot of new warnings, and it is mostly because the "revive" linter got updated. I agree with most of the new warnings, mostly around not naming unused function parameters (although it is justified in some cases for documentation purposes – while things like mocks are a good example where not naming the parameter is clearer). I'm pretty upset about the "empty block" warning to include `for` loops. It's such a common pattern to do something in the head of the `for` loop and then have an empty block. There is still an open issue about this: https://github.com/mgechev/revive/issues/810 I have disabled "revive" altogether in files where empty blocks are used excessively, and I have made the effort to add individual `// nolint:revive` where empty blocks are used just once or twice. It's borderline noisy, though, but let's go with it for now. I should mention that none of the "empty block" warnings for `for` loop bodies were legitimate. Signed-off-by: beorn7 --- cmd/prometheus/main.go | 1 + cmd/promtool/backfill_test.go | 2 +- cmd/promtool/rules.go | 2 +- cmd/promtool/rules_test.go | 4 ++-- discovery/aws/ec2.go | 2 +- discovery/hetzner/hcloud.go | 2 +- discovery/hetzner/robot.go | 4 ++-- discovery/ionos/server.go | 2 +- discovery/kubernetes/client_metrics.go | 6 +++--- discovery/kubernetes/endpoints.go | 1 + discovery/kubernetes/endpointslice.go | 2 +- discovery/kubernetes/ingress.go | 2 +- discovery/kubernetes/node.go | 2 +- discovery/kubernetes/pod.go | 2 +- discovery/kubernetes/service.go | 2 +- discovery/legacymanager/manager_test.go | 13 +++---------- discovery/manager_test.go | 17 +++++------------ discovery/nomad/nomad.go | 2 +- discovery/ovhcloud/dedicated_server.go | 2 +- discovery/ovhcloud/vps.go | 2 +- discovery/vultr/vultr.go | 4 +--- discovery/zookeeper/zookeeper.go | 2 +- model/textparse/promparse.go | 7 ++++--- promql/bench_test.go | 2 +- promql/engine.go | 2 +- promql/functions.go | 1 + promql/parser/lex.go | 3 ++- promql/parser/parse.go | 6 +++--- rules/manager.go | 7 ++++--- rules/manager_test.go | 8 ++++---- rules/origin_test.go | 22 +++++++++++----------- scrape/scrape_test.go | 4 ++-- scrape/target_test.go | 2 +- storage/buffer_test.go | 4 ++-- storage/fanout_test.go | 2 +- storage/interface.go | 6 +++--- storage/memoized_iterator_test.go | 4 ++-- storage/merge.go | 11 +++++------ storage/remote/codec_test.go | 2 +- storage/remote/queue_manager_test.go | 10 +++++----- storage/remote/write_handler_test.go | 2 +- tsdb/agent/db.go | 16 ++++++++-------- tsdb/chunkenc/float_histogram.go | 2 +- tsdb/chunkenc/float_histogram_test.go | 2 +- tsdb/chunkenc/histogram.go | 2 +- tsdb/chunkenc/histogram_test.go | 2 +- tsdb/chunkenc/xor.go | 6 +++--- tsdb/chunks/head_chunks_test.go | 4 ++-- tsdb/compact_test.go | 4 ++-- tsdb/db_test.go | 6 +++--- tsdb/exemplar.go | 12 ++++++------ tsdb/goversion/goversion_test.go | 2 +- tsdb/head_test.go | 7 ++++--- tsdb/head_wal.go | 1 + tsdb/ooo_head_read.go | 1 + tsdb/querier.go | 2 +- tsdb/querier_bench_test.go | 2 +- tsdb/querier_test.go | 1 + tsdb/wal.go | 1 + tsdb/wlog/reader_test.go | 2 +- tsdb/wlog/wlog_test.go | 2 +- util/logging/dedupe_test.go | 2 +- util/testutil/context.go | 2 +- util/testutil/roundtrip.go | 2 +- util/treecache/treecache.go | 2 +- web/api/v1/api.go | 14 +++++++------- web/api/v1/api_test.go | 6 +++--- web/api/v1/errors_test.go | 1 + web/web.go | 6 +++--- 69 files changed, 145 insertions(+), 150 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index f4f6af20d..559203646 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -12,6 +12,7 @@ // limitations under the License. // The main package for the Prometheus server executable. +// nolint:revive // Many unsued function arguments in this file by design. package main import ( diff --git a/cmd/promtool/backfill_test.go b/cmd/promtool/backfill_test.go index 2c551abeb..e6f7cad31 100644 --- a/cmd/promtool/backfill_test.go +++ b/cmd/promtool/backfill_test.go @@ -44,7 +44,7 @@ func sortSamples(samples []backfillSample) { }) } -func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample { +func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample { // nolint:revive ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")) samples := []backfillSample{} for ss.Next() { diff --git a/cmd/promtool/rules.go b/cmd/promtool/rules.go index aedc7bcb9..43b76dbe4 100644 --- a/cmd/promtool/rules.go +++ b/cmd/promtool/rules.go @@ -68,7 +68,7 @@ func newRuleImporter(logger log.Logger, config ruleImporterConfig, apiClient que } // loadGroups parses groups from a list of recording rule files. -func (importer *ruleImporter) loadGroups(ctx context.Context, filenames []string) (errs []error) { +func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string) (errs []error) { groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, filenames...) if errs != nil { return errs diff --git a/cmd/promtool/rules_test.go b/cmd/promtool/rules_test.go index fb582ed0d..213b7d2a0 100644 --- a/cmd/promtool/rules_test.go +++ b/cmd/promtool/rules_test.go @@ -35,7 +35,7 @@ type mockQueryRangeAPI struct { samples model.Matrix } -func (mockAPI mockQueryRangeAPI) QueryRange(ctx context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) { +func (mockAPI mockQueryRangeAPI) QueryRange(_ context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) { // nolint:revive return mockAPI.samples, v1.Warnings{}, nil } @@ -161,7 +161,7 @@ func TestBackfillRuleIntegration(t *testing.T) { } } -func newTestRuleImporter(ctx context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) { +func newTestRuleImporter(_ context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) { logger := log.NewNopLogger() cfg := ruleImporterConfig{ outputDir: tmpDir, diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go index ca9921159..86d76627e 100644 --- a/discovery/aws/ec2.go +++ b/discovery/aws/ec2.go @@ -164,7 +164,7 @@ func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger) *EC2Discovery { return d } -func (d *EC2Discovery) ec2Client(ctx context.Context) (*ec2.EC2, error) { +func (d *EC2Discovery) ec2Client(context.Context) (*ec2.EC2, error) { if d.ec2 != nil { return d.ec2, nil } diff --git a/discovery/hetzner/hcloud.go b/discovery/hetzner/hcloud.go index aa406a1a7..50afdc1ec 100644 --- a/discovery/hetzner/hcloud.go +++ b/discovery/hetzner/hcloud.go @@ -59,7 +59,7 @@ type hcloudDiscovery struct { } // newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets. -func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, error) { +func newHcloudDiscovery(conf *SDConfig, _ log.Logger) (*hcloudDiscovery, error) { d := &hcloudDiscovery{ port: conf.Port, } diff --git a/discovery/hetzner/robot.go b/discovery/hetzner/robot.go index 4b7abaf77..496088028 100644 --- a/discovery/hetzner/robot.go +++ b/discovery/hetzner/robot.go @@ -51,7 +51,7 @@ type robotDiscovery struct { } // newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets. -func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, error) { +func newRobotDiscovery(conf *SDConfig, _ log.Logger) (*robotDiscovery, error) { d := &robotDiscovery{ port: conf.Port, endpoint: conf.robotEndpoint, @@ -69,7 +69,7 @@ func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, erro return d, nil } -func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { +func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { req, err := http.NewRequest("GET", d.endpoint+"/server", nil) if err != nil { return nil, err diff --git a/discovery/ionos/server.go b/discovery/ionos/server.go index 8ac363970..a850fbbfb 100644 --- a/discovery/ionos/server.go +++ b/discovery/ionos/server.go @@ -60,7 +60,7 @@ type serverDiscovery struct { datacenterID string } -func newServerDiscovery(conf *SDConfig, logger log.Logger) (*serverDiscovery, error) { +func newServerDiscovery(conf *SDConfig, _ log.Logger) (*serverDiscovery, error) { d := &serverDiscovery{ port: conf.Port, datacenterID: conf.DatacenterID, diff --git a/discovery/kubernetes/client_metrics.go b/discovery/kubernetes/client_metrics.go index 3a33e3e8d..b316f7d88 100644 --- a/discovery/kubernetes/client_metrics.go +++ b/discovery/kubernetes/client_metrics.go @@ -122,11 +122,11 @@ func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer ) } -func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code, method, host string) { +func (clientGoRequestMetricAdapter) Increment(_ context.Context, code, _, _ string) { clientGoRequestResultMetricVec.WithLabelValues(code).Inc() } -func (clientGoRequestMetricAdapter) Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) { +func (clientGoRequestMetricAdapter) Observe(_ context.Context, _ string, u url.URL, latency time.Duration) { clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds()) } @@ -169,7 +169,7 @@ func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetr return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name) } -func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { +func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(string) workqueue.CounterMetric { // Retries are not used so the metric is omitted. return noopMetric{} } diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index 039daf4fa..27742ab46 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many legitimately empty blocks in this file. package kubernetes import ( diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index 135735154..29dc0be2f 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -190,7 +190,7 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group) } go func() { - for e.process(ctx, ch) { + for e.process(ctx, ch) { // nolint:revive } }() diff --git a/discovery/kubernetes/ingress.go b/discovery/kubernetes/ingress.go index 8c9249f54..ad47c341a 100644 --- a/discovery/kubernetes/ingress.go +++ b/discovery/kubernetes/ingress.go @@ -89,7 +89,7 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for i.process(ctx, ch) { + for i.process(ctx, ch) { // nolint:revive } }() diff --git a/discovery/kubernetes/node.go b/discovery/kubernetes/node.go index 93adf7825..16a06e7a0 100644 --- a/discovery/kubernetes/node.go +++ b/discovery/kubernetes/node.go @@ -96,7 +96,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for n.process(ctx, ch) { + for n.process(ctx, ch) { // nolint:revive } }() diff --git a/discovery/kubernetes/pod.go b/discovery/kubernetes/pod.go index 396720c22..732cf52ad 100644 --- a/discovery/kubernetes/pod.go +++ b/discovery/kubernetes/pod.go @@ -132,7 +132,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for p.process(ctx, ch) { + for p.process(ctx, ch) { // nolint:revive } }() diff --git a/discovery/kubernetes/service.go b/discovery/kubernetes/service.go index a19f06e7d..40e17679e 100644 --- a/discovery/kubernetes/service.go +++ b/discovery/kubernetes/service.go @@ -92,7 +92,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for s.process(ctx, ch) { + for s.process(ctx, ch) { // nolint:revive } }() diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go index 57c82b72a..13b84e6e3 100644 --- a/discovery/legacymanager/manager_test.go +++ b/discovery/legacymanager/manager_test.go @@ -686,12 +686,7 @@ func TestTargetUpdatesOrder(t *testing.T) { case tgs := <-provUpdates: discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs) for _, got := range discoveryManager.allGroups() { - assertEqualGroups(t, got, tc.expectedTargets[x], func(got, expected string) string { - return fmt.Sprintf("%d: \ntargets mismatch \ngot: %v \nexpected: %v", - x, - got, - expected) - }) + assertEqualGroups(t, got, tc.expectedTargets[x]) } } } @@ -699,7 +694,7 @@ func TestTargetUpdatesOrder(t *testing.T) { } } -func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg func(got, expected string) string) { +func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) { t.Helper() // Need to sort by the groups's source as the received order is not guaranteed. @@ -1079,9 +1074,7 @@ func TestCoordinationWithReceiver(t *testing.T) { if _, ok := tgs[k]; !ok { t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs) } - assertEqualGroups(t, tgs[k], expected.tgs[k], func(got, expected string) string { - return fmt.Sprintf("step %d: targets mismatch \ngot: %q \nexpected: %q", i, got, expected) - }) + assertEqualGroups(t, tgs[k], expected.tgs[k]) } } } diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 970168b0f..537160811 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -686,12 +686,7 @@ func TestTargetUpdatesOrder(t *testing.T) { case tgs := <-provUpdates: discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs) for _, got := range discoveryManager.allGroups() { - assertEqualGroups(t, got, tc.expectedTargets[x], func(got, expected string) string { - return fmt.Sprintf("%d: \ntargets mismatch \ngot: %v \nexpected: %v", - x, - got, - expected) - }) + assertEqualGroups(t, got, tc.expectedTargets[x]) } } } @@ -699,7 +694,7 @@ func TestTargetUpdatesOrder(t *testing.T) { } } -func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg func(got, expected string) string) { +func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) { t.Helper() // Need to sort by the groups's source as the received order is not guaranteed. @@ -1129,7 +1124,7 @@ type lockStaticConfig struct { } func (s lockStaticConfig) Name() string { return "lockstatic" } -func (s lockStaticConfig) NewDiscoverer(options DiscovererOptions) (Discoverer, error) { +func (s lockStaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) { return (lockStaticDiscoverer)(s), nil } @@ -1330,9 +1325,7 @@ func TestCoordinationWithReceiver(t *testing.T) { if _, ok := tgs[k]; !ok { t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs) } - assertEqualGroups(t, tgs[k], expected.tgs[k], func(got, expected string) string { - return fmt.Sprintf("step %d: targets mismatch \ngot: %q \nexpected: %q", i, got, expected) - }) + assertEqualGroups(t, tgs[k], expected.tgs[k]) } } } @@ -1399,7 +1392,7 @@ func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) { // TestTargetSetTargetGroupsUpdateDuringApplyConfig is used to detect races when // ApplyConfig happens at the same time as targets update. -func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) { +func TestTargetSetTargetGroupsUpdateDuringApplyConfig(*testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() discoveryManager := NewManager(ctx, log.NewNopLogger()) diff --git a/discovery/nomad/nomad.go b/discovery/nomad/nomad.go index c8d513039..7013f0737 100644 --- a/discovery/nomad/nomad.go +++ b/discovery/nomad/nomad.go @@ -161,7 +161,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { return d, nil } -func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { +func (d *Discovery) refresh(context.Context) ([]*targetgroup.Group, error) { opts := &nomad.QueryOptions{ AllowStale: d.allowStale, } diff --git a/discovery/ovhcloud/dedicated_server.go b/discovery/ovhcloud/dedicated_server.go index aeb4eccbb..bb5dadcd7 100644 --- a/discovery/ovhcloud/dedicated_server.go +++ b/discovery/ovhcloud/dedicated_server.go @@ -102,7 +102,7 @@ func (d *dedicatedServerDiscovery) getSource() string { return fmt.Sprintf("%s_%s", d.config.Name(), d.getService()) } -func (d *dedicatedServerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { +func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { client, err := createClient(d.config) if err != nil { return nil, err diff --git a/discovery/ovhcloud/vps.go b/discovery/ovhcloud/vps.go index 705b42b65..e2d1dee36 100644 --- a/discovery/ovhcloud/vps.go +++ b/discovery/ovhcloud/vps.go @@ -117,7 +117,7 @@ func (d *vpsDiscovery) getSource() string { return fmt.Sprintf("%s_%s", d.config.Name(), d.getService()) } -func (d *vpsDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { +func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { client, err := createClient(d.config) if err != nil { return nil, err diff --git a/discovery/vultr/vultr.go b/discovery/vultr/vultr.go index 2f489e7d4..42881d3c1 100644 --- a/discovery/vultr/vultr.go +++ b/discovery/vultr/vultr.go @@ -202,10 +202,8 @@ func (d *Discovery) listInstances(ctx context.Context) ([]govultr.Instance, erro if meta.Links.Next == "" { break - } else { - listOptions.Cursor = meta.Links.Next - continue } + listOptions.Cursor = meta.Links.Next } return instances, nil diff --git a/discovery/zookeeper/zookeeper.go b/discovery/zookeeper/zookeeper.go index 308d63a5f..cadff5fd2 100644 --- a/discovery/zookeeper/zookeeper.go +++ b/discovery/zookeeper/zookeeper.go @@ -193,7 +193,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } for _, pathUpdate := range d.pathUpdates { // Drain event channel in case the treecache leaks goroutines otherwise. - for range pathUpdate { + for range pathUpdate { // nolint:revive } } d.conn.Close() diff --git a/model/textparse/promparse.go b/model/textparse/promparse.go index 2c981f050..94338a666 100644 --- a/model/textparse/promparse.go +++ b/model/textparse/promparse.go @@ -238,9 +238,10 @@ func (p *PromParser) Metric(l *labels.Labels) string { return s } -// Exemplar writes the exemplar of the current sample into the passed -// exemplar. It returns if an exemplar exists. -func (p *PromParser) Exemplar(e *exemplar.Exemplar) bool { +// Exemplar implements the Parser interface. However, since the classic +// Prometheus text format does not support exemplars, this implementation simply +// returns false and does nothing else. +func (p *PromParser) Exemplar(*exemplar.Exemplar) bool { return false } diff --git a/promql/bench_test.go b/promql/bench_test.go index 3d3588447..4ece16bfe 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -27,7 +27,7 @@ import ( "github.com/prometheus/prometheus/util/teststorage" ) -func setupRangeQueryTestData(stor *teststorage.TestStorage, engine *Engine, interval, numIntervals int) error { +func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *Engine, interval, numIntervals int) error { metrics := []labels.Labels{} metrics = append(metrics, labels.FromStrings("__name__", "a_one")) metrics = append(metrics, labels.FromStrings("__name__", "b_one")) diff --git a/promql/engine.go b/promql/engine.go index fae4f0932..21b894b93 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1957,7 +1957,7 @@ func (ev *evaluator) matrixIterSlice( // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; floats[drop].T < mint; drop++ { + for drop = 0; floats[drop].T < mint; drop++ { // nolint:revive } ev.currentSamples -= drop copy(floats, floats[drop:]) diff --git a/promql/functions.go b/promql/functions.go index fd99703df..0c22cb44c 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many unsued function arguments in this file by design. package promql import ( diff --git a/promql/parser/lex.go b/promql/parser/lex.go index 7b6a6b027..657dc2809 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many legitimately empty blocks in this file. package parser import ( @@ -293,7 +294,7 @@ func (l *Lexer) accept(valid string) bool { // acceptRun consumes a run of runes from the valid set. func (l *Lexer) acceptRun(valid string) { for strings.ContainsRune(valid, l.next()) { - // consume + // Consume. } l.backup() } diff --git a/promql/parser/parse.go b/promql/parser/parse.go index 6c37ce6fc..3c32f3c05 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -332,7 +332,7 @@ func (p *parser) Lex(lval *yySymType) int { // It is a no-op since the parsers error routines are triggered // by mechanisms that allow more fine-grained control // For more information, see https://pkg.go.dev/golang.org/x/tools/cmd/goyacc. -func (p *parser) Error(e string) { +func (p *parser) Error(string) { } // InjectItem allows injecting a single Item at the beginning of the token stream @@ -481,9 +481,9 @@ func (p *parser) checkAST(node Node) (typ ValueType) { // This is made a function instead of a variable, so it is lazily evaluated on demand. opRange := func() (r PositionRange) { // Remove whitespace at the beginning and end of the range. - for r.Start = n.LHS.PositionRange().End; isSpace(rune(p.lex.input[r.Start])); r.Start++ { + for r.Start = n.LHS.PositionRange().End; isSpace(rune(p.lex.input[r.Start])); r.Start++ { // nolint:revive } - for r.End = n.RHS.PositionRange().Start - 1; isSpace(rune(p.lex.input[r.End])); r.End-- { + for r.End = n.RHS.PositionRange().Start - 1; isSpace(rune(p.lex.input[r.End])); r.End-- { // nolint:revive } return } diff --git a/rules/manager.go b/rules/manager.go index b6513e82d..31c90e9e9 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -866,12 +866,13 @@ func (g *Group) RestoreForState(ts time.Time) { timeSpentPending := downAt.Sub(restoredActiveAt) timeRemainingPending := alertHoldDuration - timeSpentPending - if timeRemainingPending <= 0 { + switch { + case timeRemainingPending <= 0: // It means that alert was firing when prometheus went down. // In the next Eval, the state of this alert will be set back to // firing again if it's still firing in that Eval. // Nothing to be done in this case. - } else if timeRemainingPending < g.opts.ForGracePeriod { + case timeRemainingPending < g.opts.ForGracePeriod: // (new) restoredActiveAt = (ts + m.opts.ForGracePeriod) - alertHoldDuration // /* new firing time */ /* moving back by hold duration */ // @@ -884,7 +885,7 @@ func (g *Group) RestoreForState(ts time.Time) { // = (ts + m.opts.ForGracePeriod) - ts // = m.opts.ForGracePeriod restoredActiveAt = ts.Add(g.opts.ForGracePeriod).Add(-alertHoldDuration) - } else { + default: // By shifting ActiveAt to the future (ActiveAt + some_duration), // the total pending time from the original ActiveAt // would be `alertHoldDuration + some_duration`. diff --git a/rules/manager_test.go b/rules/manager_test.go index 440e06c9a..16bb080f5 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -779,13 +779,13 @@ func TestUpdate(t *testing.T) { rgs.Groups[i].Interval = model.Duration(10) } } - reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs) + reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs) // Update limit and reload. for i := range rgs.Groups { rgs.Groups[i].Limit = 1 } - reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs) + reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs) // Change group rules and reload. for i, g := range rgs.Groups { @@ -793,7 +793,7 @@ func TestUpdate(t *testing.T) { rgs.Groups[i].Rules[j].Expr.SetString(fmt.Sprintf("%s * 0", r.Expr.Value)) } } - reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs) + reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs) } // ruleGroupsTest for running tests over rules. @@ -836,7 +836,7 @@ func formatRules(r *rulefmt.RuleGroups) ruleGroupsTest { } } -func reloadAndValidate(rgs *rulefmt.RuleGroups, t *testing.T, tmpFile *os.File, ruleManager *Manager, expected map[string]labels.Labels, ogs map[string]*Group) { +func reloadAndValidate(rgs *rulefmt.RuleGroups, t *testing.T, tmpFile *os.File, ruleManager *Manager, ogs map[string]*Group) { bs, err := yaml.Marshal(formatRules(rgs)) require.NoError(t, err) tmpFile.Seek(0, 0) diff --git a/rules/origin_test.go b/rules/origin_test.go index dd8e47f74..ea4f4f905 100644 --- a/rules/origin_test.go +++ b/rules/origin_test.go @@ -30,19 +30,19 @@ type unknownRule struct{} func (u unknownRule) Name() string { return "" } func (u unknownRule) Labels() labels.Labels { return labels.EmptyLabels() } -func (u unknownRule) Eval(ctx context.Context, time time.Time, queryFunc QueryFunc, url *url.URL, i int) (promql.Vector, error) { +func (u unknownRule) Eval(context.Context, time.Time, QueryFunc, *url.URL, int) (promql.Vector, error) { return nil, nil } -func (u unknownRule) String() string { return "" } -func (u unknownRule) Query() parser.Expr { return nil } -func (u unknownRule) SetLastError(err error) {} -func (u unknownRule) LastError() error { return nil } -func (u unknownRule) SetHealth(health RuleHealth) {} -func (u unknownRule) Health() RuleHealth { return "" } -func (u unknownRule) SetEvaluationDuration(duration time.Duration) {} -func (u unknownRule) GetEvaluationDuration() time.Duration { return 0 } -func (u unknownRule) SetEvaluationTimestamp(time time.Time) {} -func (u unknownRule) GetEvaluationTimestamp() time.Time { return time.Time{} } +func (u unknownRule) String() string { return "" } +func (u unknownRule) Query() parser.Expr { return nil } +func (u unknownRule) SetLastError(error) {} +func (u unknownRule) LastError() error { return nil } +func (u unknownRule) SetHealth(RuleHealth) {} +func (u unknownRule) Health() RuleHealth { return "" } +func (u unknownRule) SetEvaluationDuration(time.Duration) {} +func (u unknownRule) GetEvaluationDuration() time.Duration { return 0 } +func (u unknownRule) SetEvaluationTimestamp(time.Time) {} +func (u unknownRule) GetEvaluationTimestamp() time.Time { return time.Time{} } func TestNewRuleDetailPanics(t *testing.T) { require.PanicsWithValue(t, `unknown rule type "rules.unknownRule"`, func() { diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index dcb3b48c1..a8028b652 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -2405,7 +2405,7 @@ type testScraper struct { scrapeFunc func(context.Context, io.Writer) error } -func (ts *testScraper) offset(interval time.Duration, jitterSeed uint64) time.Duration { +func (ts *testScraper) offset(time.Duration, uint64) time.Duration { return ts.offsetDur } @@ -2867,7 +2867,7 @@ func TestScrapeAddFast(t *testing.T) { require.NoError(t, slApp.Commit()) } -func TestReuseCacheRace(t *testing.T) { +func TestReuseCacheRace(*testing.T) { var ( app = &nopAppendable{} cfg = &config.ScrapeConfig{ diff --git a/scrape/target_test.go b/scrape/target_test.go index 2bc3f000c..9d25df414 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -134,7 +134,7 @@ func TestTargetURL(t *testing.T) { require.Equal(t, expectedURL, target.URL()) } -func newTestTarget(targetURL string, deadline time.Duration, lbls labels.Labels) *Target { +func newTestTarget(targetURL string, _ time.Duration, lbls labels.Labels) *Target { lb := labels.NewBuilder(lbls) lb.Set(model.SchemeLabel, "http") lb.Set(model.AddressLabel, strings.TrimPrefix(targetURL, "http://")) diff --git a/storage/buffer_test.go b/storage/buffer_test.go index ebe24d8df..fcb43c273 100644 --- a/storage/buffer_test.go +++ b/storage/buffer_test.go @@ -188,8 +188,8 @@ func BenchmarkBufferedSeriesIterator(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for it.Next() != chunkenc.ValNone { - // scan everything + for it.Next() != chunkenc.ValNone { // nolint:revive + // Scan everything. } require.NoError(b, it.Err()) } diff --git a/storage/fanout_test.go b/storage/fanout_test.go index 4996e8f64..b4490636d 100644 --- a/storage/fanout_test.go +++ b/storage/fanout_test.go @@ -233,7 +233,7 @@ func (errQuerier) Select(bool, *storage.SelectHints, ...*labels.Matcher) storage return storage.ErrSeriesSet(errSelect) } -func (errQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (errQuerier) LabelValues(string, ...*labels.Matcher) ([]string, storage.Warnings, error) { return nil, nil, errors.New("label values error") } diff --git a/storage/interface.go b/storage/interface.go index 5cf70a351..b282f1fc6 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -99,7 +99,7 @@ type MockQueryable struct { MockQuerier Querier } -func (q *MockQueryable) Querier(ctx context.Context, mint, maxt int64) (Querier, error) { +func (q *MockQueryable) Querier(context.Context, int64, int64) (Querier, error) { return q.MockQuerier, nil } @@ -118,11 +118,11 @@ type MockQuerier struct { SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet } -func (q *MockQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, Warnings, error) { +func (q *MockQuerier) LabelValues(string, ...*labels.Matcher) ([]string, Warnings, error) { return nil, nil, nil } -func (q *MockQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, Warnings, error) { +func (q *MockQuerier) LabelNames(...*labels.Matcher) ([]string, Warnings, error) { return nil, nil, nil } diff --git a/storage/memoized_iterator_test.go b/storage/memoized_iterator_test.go index 382c84e63..d996436e0 100644 --- a/storage/memoized_iterator_test.go +++ b/storage/memoized_iterator_test.go @@ -82,8 +82,8 @@ func BenchmarkMemoizedSeriesIterator(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for it.Next() != chunkenc.ValNone { - // scan everything + for it.Next() != chunkenc.ValNone { // nolint:revive + // Scan everything. } require.NoError(b, it.Err()) } diff --git a/storage/merge.go b/storage/merge.go index 8db1f7ae8..193a02522 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -722,12 +722,11 @@ func (c *compactChunkIterator) Next() bool { break } - if next.MinTime == prev.MinTime && - next.MaxTime == prev.MaxTime && - bytes.Equal(next.Chunk.Bytes(), prev.Chunk.Bytes()) { - // 1:1 duplicates, skip it. - } else { - // We operate on same series, so labels does not matter here. + // Only do something if it is not a perfect duplicate. + if next.MinTime != prev.MinTime || + next.MaxTime != prev.MaxTime || + !bytes.Equal(next.Chunk.Bytes(), prev.Chunk.Bytes()) { + // We operate on same series, so labels do not matter here. overlapping = append(overlapping, newChunkToSeriesDecoder(labels.EmptyLabels(), next)) if next.MaxTime > oMaxTime { oMaxTime = next.MaxTime diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index 36d0d7c31..27e2cc704 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -524,7 +524,7 @@ func TestDecodeWriteRequest(t *testing.T) { require.Equal(t, writeRequestFixture, actual) } -func TestNilHistogramProto(t *testing.T) { +func TestNilHistogramProto(*testing.T) { // This function will panic if it impromperly handles nil // values, causing the test to fail. HistogramProtoToHistogram(prompb.Histogram{}) diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index 5ec52de6b..a57c3bf7b 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -362,7 +362,7 @@ func TestReshard(t *testing.T) { c.waitForExpectedData(t) } -func TestReshardRaceWithStop(t *testing.T) { +func TestReshardRaceWithStop(*testing.T) { c := NewTestWriteClient() var m *QueueManager h := sync.Mutex{} @@ -864,10 +864,10 @@ func (c *TestBlockingWriteClient) Endpoint() string { // For benchmarking the send and not the receive side. type NopWriteClient struct{} -func NewNopWriteClient() *NopWriteClient { return &NopWriteClient{} } -func (c *NopWriteClient) Store(_ context.Context, req []byte) error { return nil } -func (c *NopWriteClient) Name() string { return "nopwriteclient" } -func (c *NopWriteClient) Endpoint() string { return "http://test-remote.com/1234" } +func NewNopWriteClient() *NopWriteClient { return &NopWriteClient{} } +func (c *NopWriteClient) Store(context.Context, []byte) error { return nil } +func (c *NopWriteClient) Name() string { return "nopwriteclient" } +func (c *NopWriteClient) Endpoint() string { return "http://test-remote.com/1234" } func BenchmarkSampleSend(b *testing.B) { // Send one sample per series, which is the typical remote_write case diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 9c787f17e..e7a88ddc2 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -294,7 +294,7 @@ func (m *mockAppendable) AppendExemplar(_ storage.SeriesRef, l labels.Labels, e return 0, nil } -func (m *mockAppendable) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { +func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if t < m.latestHistogram { return 0, storage.ErrOutOfOrderSample } diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index a17e0d1b9..cb075f306 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -732,22 +732,22 @@ func (db *DB) StartTime() (int64, error) { } // Querier implements the Storage interface. -func (db *DB) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { +func (db *DB) Querier(context.Context, int64, int64) (storage.Querier, error) { return nil, ErrUnsupported } // ChunkQuerier implements the Storage interface. -func (db *DB) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { +func (db *DB) ChunkQuerier(context.Context, int64, int64) (storage.ChunkQuerier, error) { return nil, ErrUnsupported } // ExemplarQuerier implements the Storage interface. -func (db *DB) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) { +func (db *DB) ExemplarQuerier(context.Context) (storage.ExemplarQuerier, error) { return nil, ErrUnsupported } // Appender implements storage.Storage. -func (db *DB) Appender(_ context.Context) storage.Appender { +func (db *DB) Appender(context.Context) storage.Appender { return db.appenderPool.Get().(storage.Appender) } @@ -823,7 +823,7 @@ func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v flo return 0, storage.ErrOutOfOrderSample } - // NOTE: always modify pendingSamples and sampleSeries together + // NOTE: always modify pendingSamples and sampleSeries together. a.pendingSamples = append(a.pendingSamples, record.RefSample{ Ref: series.ref, T: t, @@ -849,8 +849,8 @@ func (a *appender) getOrCreate(l labels.Labels) (series *memSeries, created bool return series, true } -func (a *appender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { - // series references and chunk references are identical for agent mode. +func (a *appender) AppendExemplar(ref storage.SeriesRef, _ labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { + // Series references and chunk references are identical for agent mode. headRef := chunks.HeadSeriesRef(ref) s := a.series.GetByID(headRef) @@ -973,7 +973,7 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int return storage.SeriesRef(series.ref), nil } -func (a *appender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { +func (a *appender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) { // TODO: Wire metadata in the Agent's appender. return 0, nil } diff --git a/tsdb/chunkenc/float_histogram.go b/tsdb/chunkenc/float_histogram.go index b462c6d9f..67b706b2e 100644 --- a/tsdb/chunkenc/float_histogram.go +++ b/tsdb/chunkenc/float_histogram.go @@ -107,7 +107,7 @@ func (c *FloatHistogramChunk) Appender() (Appender, error) { // To get an appender, we must know the state it would have if we had // appended all existing data from scratch. We iterate through the end // and populate via the iterator's state. - for it.Next() == ValFloatHistogram { + for it.Next() == ValFloatHistogram { // nolint:revive } if err := it.Err(); err != nil { return nil, err diff --git a/tsdb/chunkenc/float_histogram_test.go b/tsdb/chunkenc/float_histogram_test.go index 31d96ee7a..90c16d1ea 100644 --- a/tsdb/chunkenc/float_histogram_test.go +++ b/tsdb/chunkenc/float_histogram_test.go @@ -111,7 +111,7 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) { // 3. Now recycle an iterator that was never used to access anything. itX := c.Iterator(nil) - for itX.Next() == ValFloatHistogram { + for itX.Next() == ValFloatHistogram { // nolint:revive // Just iterate through without accessing anything. } it3 := c.iterator(itX) diff --git a/tsdb/chunkenc/histogram.go b/tsdb/chunkenc/histogram.go index 7b6a9cacb..0d53f1f49 100644 --- a/tsdb/chunkenc/histogram.go +++ b/tsdb/chunkenc/histogram.go @@ -126,7 +126,7 @@ func (c *HistogramChunk) Appender() (Appender, error) { // To get an appender, we must know the state it would have if we had // appended all existing data from scratch. We iterate through the end // and populate via the iterator's state. - for it.Next() == ValHistogram { + for it.Next() == ValHistogram { // nolint:revive } if err := it.Err(); err != nil { return nil, err diff --git a/tsdb/chunkenc/histogram_test.go b/tsdb/chunkenc/histogram_test.go index 4bb146ccd..45f31a3b4 100644 --- a/tsdb/chunkenc/histogram_test.go +++ b/tsdb/chunkenc/histogram_test.go @@ -116,7 +116,7 @@ func TestHistogramChunkSameBuckets(t *testing.T) { // 3. Now recycle an iterator that was never used to access anything. itX := c.Iterator(nil) - for itX.Next() == ValHistogram { + for itX.Next() == ValHistogram { // nolint:revive // Just iterate through without accessing anything. } it3 := c.iterator(itX) diff --git a/tsdb/chunkenc/xor.go b/tsdb/chunkenc/xor.go index 2fa2f613c..ba2d96d36 100644 --- a/tsdb/chunkenc/xor.go +++ b/tsdb/chunkenc/xor.go @@ -99,7 +99,7 @@ func (c *XORChunk) Appender() (Appender, error) { // To get an appender we must know the state it would have if we had // appended all existing data from scratch. // We iterate through the end and populate via the iterator's state. - for it.Next() != ValNone { + for it.Next() != ValNone { // nolint:revive } if err := it.Err(); err != nil { return nil, err @@ -152,11 +152,11 @@ type xorAppender struct { trailing uint8 } -func (a *xorAppender) AppendHistogram(t int64, h *histogram.Histogram) { +func (a *xorAppender) AppendHistogram(int64, *histogram.Histogram) { panic("appended a histogram to an xor chunk") } -func (a *xorAppender) AppendFloatHistogram(t int64, h *histogram.FloatHistogram) { +func (a *xorAppender) AppendFloatHistogram(int64, *histogram.FloatHistogram) { panic("appended a float histogram to an xor chunk") } diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go index ac89ae3e5..20a4c2064 100644 --- a/tsdb/chunks/head_chunks_test.go +++ b/tsdb/chunks/head_chunks_test.go @@ -503,10 +503,10 @@ func createChunkDiskMapper(t *testing.T, dir string) *ChunkDiskMapper { func randomChunk(t *testing.T) chunkenc.Chunk { chunk := chunkenc.NewXORChunk() - len := rand.Int() % 120 + length := rand.Int() % 120 app, err := chunk.Appender() require.NoError(t, err) - for i := 0; i < len; i++ { + for i := 0; i < length; i++ { app.Append(rand.Int63(), rand.Float64()) } return chunk diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index d9a105f22..93bca179e 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -467,8 +467,8 @@ func (erringBReader) Size() int64 { return 0 } type nopChunkWriter struct{} -func (nopChunkWriter) WriteChunks(chunks ...chunks.Meta) error { return nil } -func (nopChunkWriter) Close() error { return nil } +func (nopChunkWriter) WriteChunks(...chunks.Meta) error { return nil } +func (nopChunkWriter) Close() error { return nil } func samplesForRange(minTime, maxTime int64, maxSamplesPerChunk int) (ret [][]sample) { var curr []sample diff --git a/tsdb/db_test.go b/tsdb/db_test.go index c54fccf6f..3e4d35f87 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -1426,11 +1426,11 @@ type mockCompactorFailing struct { max int } -func (*mockCompactorFailing) Plan(dir string) ([]string, error) { +func (*mockCompactorFailing) Plan(string) ([]string, error) { return nil, nil } -func (c *mockCompactorFailing) Write(dest string, b BlockReader, mint, maxt int64, parent *BlockMeta) (ulid.ULID, error) { +func (c *mockCompactorFailing) Write(dest string, _ BlockReader, _, _ int64, _ *BlockMeta) (ulid.ULID, error) { if len(c.blocks) >= c.max { return ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail") } @@ -1458,7 +1458,7 @@ func (*mockCompactorFailing) Compact(string, []string, []*Block) (ulid.ULID, err return ulid.ULID{}, nil } -func (*mockCompactorFailing) CompactOOO(dest string, oooHead *OOOCompactionHead) (result []ulid.ULID, err error) { +func (*mockCompactorFailing) CompactOOO(string, *OOOCompactionHead) (result []ulid.ULID, err error) { return nil, fmt.Errorf("mock compaction failing CompactOOO") } diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index 5ba3567e4..ca108ae91 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -115,17 +115,17 @@ func NewExemplarMetrics(reg prometheus.Registerer) *ExemplarMetrics { // 1GB of extra memory, accounting for the fact that this is heap allocated space. // If len <= 0, then the exemplar storage is essentially a noop storage but can later be // resized to store exemplars. -func NewCircularExemplarStorage(len int64, m *ExemplarMetrics) (ExemplarStorage, error) { - if len < 0 { - len = 0 +func NewCircularExemplarStorage(length int64, m *ExemplarMetrics) (ExemplarStorage, error) { + if length < 0 { + length = 0 } c := &CircularExemplarStorage{ - exemplars: make([]*circularBufferEntry, len), - index: make(map[string]*indexEntry, len/estimatedExemplarsPerSeries), + exemplars: make([]*circularBufferEntry, length), + index: make(map[string]*indexEntry, length/estimatedExemplarsPerSeries), metrics: m, } - c.metrics.maxExemplars.Set(float64(len)) + c.metrics.maxExemplars.Set(float64(length)) return c, nil } diff --git a/tsdb/goversion/goversion_test.go b/tsdb/goversion/goversion_test.go index 9a7486d66..853844fb9 100644 --- a/tsdb/goversion/goversion_test.go +++ b/tsdb/goversion/goversion_test.go @@ -24,4 +24,4 @@ import ( // // The blank import above is actually what invokes the test of this package. If // the import succeeds (the code compiles), the test passed. -func Test(t *testing.T) {} +func Test(*testing.T) {} diff --git a/tsdb/head_test.go b/tsdb/head_test.go index e80c197b2..39bcf4c78 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many legitimately empty blocks in this file. package tsdb import ( @@ -103,7 +104,7 @@ func BenchmarkHeadAppender_Append_Commit_ExistingSeries(b *testing.B) { b.Cleanup(func() { require.NoError(b, h.Close()) }) ts := int64(1000) - append := func() error { + appendSamples := func() error { var err error app := h.Appender(context.Background()) for _, s := range series[:seriesCount] { @@ -120,13 +121,13 @@ func BenchmarkHeadAppender_Append_Commit_ExistingSeries(b *testing.B) { } // Init series, that's not what we're benchmarking here. - require.NoError(b, append()) + require.NoError(b, appendSamples()) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - require.NoError(b, append()) + require.NoError(b, appendSamples()) } }) } diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 6a8a30d5a..4da500a71 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many legitimately empty blocks in this file. package tsdb import ( diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 86d0e3b7b..fd16c6ca8 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many unsued function arguments in this file by design. package tsdb import ( diff --git a/tsdb/querier.go b/tsdb/querier.go index 4b3144c71..f3852d517 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -1085,7 +1085,7 @@ func newNopChunkReader() ChunkReader { } } -func (cr nopChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { +func (cr nopChunkReader) Chunk(chunks.Meta) (chunkenc.Chunk, error) { return cr.emptyChunk, nil } diff --git a/tsdb/querier_bench_test.go b/tsdb/querier_bench_test.go index 78993d105..89758a1d3 100644 --- a/tsdb/querier_bench_test.go +++ b/tsdb/querier_bench_test.go @@ -250,7 +250,7 @@ func BenchmarkQuerierSelect(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { ss := q.Select(sorted, nil, matcher) - for ss.Next() { + for ss.Next() { // nolint:revive } require.NoError(b, ss.Err()) } diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index fa3dd2418..3a92e4951 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many unsued function arguments in this file by design. package tsdb import ( diff --git a/tsdb/wal.go b/tsdb/wal.go index e0bc1ec69..93d445a12 100644 --- a/tsdb/wal.go +++ b/tsdb/wal.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many unsued function arguments in this file by design. package tsdb import ( diff --git a/tsdb/wlog/reader_test.go b/tsdb/wlog/reader_test.go index 97d251b3a..737520e76 100644 --- a/tsdb/wlog/reader_test.go +++ b/tsdb/wlog/reader_test.go @@ -533,7 +533,7 @@ func TestReaderData(t *testing.T) { require.NoError(t, err) reader := fn(sr) - for reader.Next() { + for reader.Next() { // nolint:revive } require.NoError(t, reader.Err()) diff --git a/tsdb/wlog/wlog_test.go b/tsdb/wlog/wlog_test.go index ed8a9df2e..d36934bbd 100644 --- a/tsdb/wlog/wlog_test.go +++ b/tsdb/wlog/wlog_test.go @@ -164,7 +164,7 @@ func TestWALRepair_ReadingError(t *testing.T) { sr := NewSegmentBufReader(s) require.NoError(t, err) r := NewReader(sr) - for r.Next() { + for r.Next() { // nolint:revive } // Close the segment so we don't break things on Windows. diff --git a/util/logging/dedupe_test.go b/util/logging/dedupe_test.go index ad234445b..e05d6454c 100644 --- a/util/logging/dedupe_test.go +++ b/util/logging/dedupe_test.go @@ -22,7 +22,7 @@ import ( type counter int -func (c *counter) Log(keyvals ...interface{}) error { +func (c *counter) Log(...interface{}) error { (*c)++ return nil } diff --git a/util/testutil/context.go b/util/testutil/context.go index cf730421b..c1f4a831c 100644 --- a/util/testutil/context.go +++ b/util/testutil/context.go @@ -37,6 +37,6 @@ func (c *MockContext) Err() error { } // Value ignores the Value and always returns nil -func (c *MockContext) Value(key interface{}) interface{} { +func (c *MockContext) Value(interface{}) interface{} { return nil } diff --git a/util/testutil/roundtrip.go b/util/testutil/roundtrip.go index a93991a13..364e0c264 100644 --- a/util/testutil/roundtrip.go +++ b/util/testutil/roundtrip.go @@ -22,7 +22,7 @@ type roundTrip struct { theError error } -func (rt *roundTrip) RoundTrip(r *http.Request) (*http.Response, error) { +func (rt *roundTrip) RoundTrip(*http.Request) (*http.Response, error) { return rt.theResponse, rt.theError } diff --git a/util/treecache/treecache.go b/util/treecache/treecache.go index 7dd41dced..acdd6f7be 100644 --- a/util/treecache/treecache.go +++ b/util/treecache/treecache.go @@ -116,7 +116,7 @@ func (tc *ZookeeperTreeCache) Stop() { tc.stop <- struct{}{} go func() { // Drain tc.head.events so that go routines can make progress and exit. - for range tc.head.events { + for range tc.head.events { // nolint:revive } }() go func() { diff --git a/web/api/v1/api.go b/web/api/v1/api.go index dde814eb0..0624cf2d8 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -117,7 +117,7 @@ type RulesRetriever interface { type StatsRenderer func(context.Context, *stats.Statistics, string) stats.QueryStats -func defaultStatsRenderer(ctx context.Context, s *stats.Statistics, param string) stats.QueryStats { +func defaultStatsRenderer(_ context.Context, s *stats.Statistics, param string) stats.QueryStats { if param != "" { return stats.NewQueryStats(s) } @@ -392,7 +392,7 @@ func invalidParamError(err error, parameter string) apiFuncResult { }, nil, nil} } -func (api *API) options(r *http.Request) apiFuncResult { +func (api *API) options(*http.Request) apiFuncResult { return apiFuncResult{nil, nil, nil, nil} } @@ -1565,7 +1565,7 @@ func (api *API) snapshot(r *http.Request) apiFuncResult { }{name}, nil, nil, nil} } -func (api *API) cleanTombstones(r *http.Request) apiFuncResult { +func (api *API) cleanTombstones(*http.Request) apiFuncResult { if !api.enableAdmin { return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil} } @@ -1764,7 +1764,7 @@ func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { stream.WriteObjectEnd() } -func marshalSeriesJSONIsEmpty(ptr unsafe.Pointer) bool { +func marshalSeriesJSONIsEmpty(unsafe.Pointer) bool { return false } @@ -1817,7 +1817,7 @@ func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { stream.WriteObjectEnd() } -func marshalSampleJSONIsEmpty(ptr unsafe.Pointer) bool { +func marshalSampleJSONIsEmpty(unsafe.Pointer) bool { return false } @@ -1841,7 +1841,7 @@ func marshalHPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { stream.WriteArrayEnd() } -func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool { +func marshalPointJSONIsEmpty(unsafe.Pointer) bool { return false } @@ -1878,6 +1878,6 @@ func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { stream.WriteObjectEnd() } -func marshalExemplarJSONEmpty(ptr unsafe.Pointer) bool { +func marshalExemplarJSONEmpty(unsafe.Pointer) bool { return false } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index efce04221..04aab31af 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -2560,9 +2560,9 @@ type fakeDB struct { err error } -func (f *fakeDB) CleanTombstones() error { return f.err } -func (f *fakeDB) Delete(mint, maxt int64, ms ...*labels.Matcher) error { return f.err } -func (f *fakeDB) Snapshot(dir string, withHead bool) error { return f.err } +func (f *fakeDB) CleanTombstones() error { return f.err } +func (f *fakeDB) Delete(int64, int64, ...*labels.Matcher) error { return f.err } +func (f *fakeDB) Snapshot(string, bool) error { return f.err } func (f *fakeDB) Stats(statsByLabelName string) (_ *tsdb.Stats, retErr error) { dbDir, err := os.MkdirTemp("", "tsdb-api-ready") if err != nil { diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go index da0d4b3f2..4947afd81 100644 --- a/web/api/v1/errors_test.go +++ b/web/api/v1/errors_test.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many unsued function arguments in this file by design. package v1 import ( diff --git a/web/web.go b/web/web.go index 9d63094f6..cac41502d 100644 --- a/web/web.go +++ b/web/web.go @@ -755,14 +755,14 @@ func toFloat64(f *io_prometheus_client.MetricFamily) float64 { return math.NaN() } -func (h *Handler) version(w http.ResponseWriter, r *http.Request) { +func (h *Handler) version(w http.ResponseWriter, _ *http.Request) { dec := json.NewEncoder(w) if err := dec.Encode(h.versionInfo); err != nil { http.Error(w, fmt.Sprintf("error encoding JSON: %s", err), http.StatusInternalServerError) } } -func (h *Handler) quit(w http.ResponseWriter, r *http.Request) { +func (h *Handler) quit(w http.ResponseWriter, _ *http.Request) { var closed bool h.quitOnce.Do(func() { closed = true @@ -774,7 +774,7 @@ func (h *Handler) quit(w http.ResponseWriter, r *http.Request) { } } -func (h *Handler) reload(w http.ResponseWriter, r *http.Request) { +func (h *Handler) reload(w http.ResponseWriter, _ *http.Request) { rc := make(chan error) h.reloadCh <- rc if err := <-rc; err != nil { From 5b53aa11083233963bd477d19134492355d9ed0f Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 12 Apr 2023 16:14:31 +0200 Subject: [PATCH 088/632] style: Replace `else if` cascades with `switch` Wiser coders than myself have come to the conclusion that a `switch` statement is almost always superior to a statement that includes any `else if`. The exceptions that I have found in our codebase are just these two: * The `if else` is followed by an additional statement before the next condition (separated by a `;`). * The whole thing is within a `for` loop and `break` statements are used. In this case, using `switch` would require tagging the `for` loop, which probably tips the balance. Why are `switch` statements more readable? For one, fewer curly braces. But more importantly, the conditions all have the same alignment, so the whole thing follows the natural flow of going down a list of conditions. With `else if`, in contrast, all conditions but the first are "hidden" behind `} else if `, harder to spot and (for no good reason) presented differently from the first condition. I'm sure the aforemention wise coders can list even more reasons. In any case, I like it so much that I have found myself recommending it in code reviews. I would like to make it a habit in our code base, without making it a hard requirement that we would test on the CI. But for that, there has to be a role model, so this commit eliminates all `if else` occurrences, unless it is autogenerated code or fits one of the exceptions above. Signed-off-by: beorn7 --- cmd/prometheus/main_unix_test.go | 6 +- cmd/promtool/tsdb.go | 5 +- discovery/dns/dns.go | 7 ++- discovery/kubernetes/kubernetes.go | 7 ++- discovery/linode/linode.go | 7 ++- discovery/marathon/marathon.go | 12 ++-- .../remote_storage_adapter/influxdb/client.go | 7 ++- model/histogram/float_histogram.go | 5 +- model/labels/labels.go | 14 +++-- promql/engine.go | 56 +++++++++++-------- promql/functions.go | 8 ++- promql/parser/ast.go | 9 +-- promql/parser/lex.go | 19 ++++--- promql/parser/parse.go | 28 +++++----- promql/parser/printer.go | 36 +++++++----- promql/quantile.go | 10 ++-- rules/alerting_test.go | 6 +- rules/manager_test.go | 7 ++- rules/recording_test.go | 6 +- scrape/manager.go | 5 +- scrape/scrape.go | 10 ++-- scrape/scrape_test.go | 36 +++++++----- storage/fanout.go | 5 +- storage/merge.go | 7 ++- storage/remote/codec.go | 23 ++++---- storage/remote/ewma.go | 5 +- storage/remote/queue_manager.go | 10 ++-- tsdb/agent/db.go | 5 +- tsdb/chunkenc/xor.go | 7 ++- tsdb/chunks/head_chunks.go | 5 +- tsdb/db.go | 10 ++-- tsdb/head_append.go | 36 +++++++----- tsdb/head_read.go | 13 +++-- tsdb/head_test.go | 29 +++++----- tsdb/index/postings.go | 22 ++++---- tsdb/ooo_head_read.go | 11 ++-- tsdb/querier.go | 25 +++++---- tsdb/tombstones/tombstones.go | 5 +- tsdb/wal.go | 20 ++++--- tsdb/wlog/live_reader.go | 5 +- tsdb/wlog/watcher.go | 10 ++-- util/treecache/treecache.go | 16 +++--- web/api/v1/api.go | 8 ++- web/federate_test.go | 10 ++-- 44 files changed, 340 insertions(+), 253 deletions(-) diff --git a/cmd/prometheus/main_unix_test.go b/cmd/prometheus/main_unix_test.go index b49110ea9..7224e25d7 100644 --- a/cmd/prometheus/main_unix_test.go +++ b/cmd/prometheus/main_unix_test.go @@ -72,9 +72,11 @@ Loop: if !startedOk { t.Fatal("prometheus didn't start in the specified timeout") } - if err := prom.Process.Kill(); err == nil { + switch err := prom.Process.Kill(); { + case err == nil: t.Errorf("prometheus didn't shutdown gracefully after sending the Interrupt signal") - } else if stoppedErr != nil && stoppedErr.Error() != "signal: interrupt" { // TODO - find a better way to detect when the process didn't exit as expected! + case stoppedErr != nil && stoppedErr.Error() != "signal: interrupt": + // TODO: find a better way to detect when the process didn't exit as expected! t.Errorf("prometheus exited with an unexpected error: %v", stoppedErr) } } diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 0e0cdb863..84aa43a9c 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -403,14 +403,15 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) return nil, nil, err } var block tsdb.BlockReader - if blockID != "" { + switch { + case blockID != "": for _, b := range blocks { if b.Meta().ULID.String() == blockID { block = b break } } - } else if len(blocks) > 0 { + case len(blocks) > 0: block = blocks[len(blocks)-1] } if block == nil { diff --git a/discovery/dns/dns.go b/discovery/dns/dns.go index 2b11c242a..96e07254f 100644 --- a/discovery/dns/dns.go +++ b/discovery/dns/dns.go @@ -285,21 +285,22 @@ func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Ms for _, lname := range conf.NameList(name) { response, err := lookupFromAnyServer(lname, qtype, conf, logger) - if err != nil { + switch { + case err != nil: // We can't go home yet, because a later name // may give us a valid, successful answer. However // we can no longer say "this name definitely doesn't // exist", because we did not get that answer for // at least one name. allResponsesValid = false - } else if response.Rcode == dns.RcodeSuccess { + case response.Rcode == dns.RcodeSuccess: // Outcome 1: GOLD! return response, nil } } if allResponsesValid { - // Outcome 2: everyone says NXDOMAIN, that's good enough for me + // Outcome 2: everyone says NXDOMAIN, that's good enough for me. return &dns.Msg{}, nil } // Outcome 3: boned. diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index 0f03e2cdb..a44bd513c 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -299,12 +299,13 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) { err error ownNamespace string ) - if conf.KubeConfig != "" { + switch { + case conf.KubeConfig != "": kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig) if err != nil { return nil, err } - } else if conf.APIServer.URL == nil { + case conf.APIServer.URL == nil: // Use the Kubernetes provided pod service account // as described in https://kubernetes.io/docs/admin/service-accounts-admin/ kcfg, err = rest.InClusterConfig() @@ -324,7 +325,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) { } level.Info(l).Log("msg", "Using pod service account via in-cluster config") - } else { + default: rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd") if err != nil { return nil, err diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index 0fd0a2c37..449e13cd8 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -250,19 +250,20 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro continue } - if detailedIP.Public && publicIPv4 == "" { + switch { + case detailedIP.Public && publicIPv4 == "": publicIPv4 = detailedIP.Address if detailedIP.RDNS != "" && detailedIP.RDNS != "null" { publicIPv4RDNS = detailedIP.RDNS } - } else if !detailedIP.Public && privateIPv4 == "" { + case !detailedIP.Public && privateIPv4 == "": privateIPv4 = detailedIP.Address if detailedIP.RDNS != "" && detailedIP.RDNS != "null" { privateIPv4RDNS = detailedIP.RDNS } - } else { + default: extraIPs = append(extraIPs, detailedIP.Address) } } diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index 079f93ad0..cfd3e2c08 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -136,9 +136,10 @@ func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) { return nil, err } - if len(conf.AuthToken) > 0 { + switch { + case len(conf.AuthToken) > 0: rt, err = newAuthTokenRoundTripper(conf.AuthToken, rt) - } else if len(conf.AuthTokenFile) > 0 { + case len(conf.AuthTokenFile) > 0: rt, err = newAuthTokenFileRoundTripper(conf.AuthTokenFile, rt) } if err != nil { @@ -400,19 +401,20 @@ func targetsForApp(app *app) []model.LabelSet { var labels []map[string]string var prefix string - if len(app.Container.PortMappings) != 0 { + switch { + case len(app.Container.PortMappings) != 0: // In Marathon 1.5.x the "container.docker.portMappings" object was moved // to "container.portMappings". ports, labels = extractPortMapping(app.Container.PortMappings, app.isContainerNet()) prefix = portMappingLabelPrefix - } else if len(app.Container.Docker.PortMappings) != 0 { + case len(app.Container.Docker.PortMappings) != 0: // Prior to Marathon 1.5 the port mappings could be found at the path // "container.docker.portMappings". ports, labels = extractPortMapping(app.Container.Docker.PortMappings, app.isContainerNet()) prefix = portMappingLabelPrefix - } else if len(app.PortDefinitions) != 0 { + case len(app.PortDefinitions) != 0: // PortDefinitions deprecates the "ports" array and can be used to specify // a list of ports with metadata in case a mapping is not required. ports = make([]uint32, len(app.PortDefinitions)) diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go index fffbc9c2a..959656aa8 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go @@ -290,13 +290,14 @@ func mergeSamples(a, b []prompb.Sample) []prompb.Sample { result := make([]prompb.Sample, 0, len(a)+len(b)) i, j := 0, 0 for i < len(a) && j < len(b) { - if a[i].Timestamp < b[j].Timestamp { + switch { + case a[i].Timestamp < b[j].Timestamp: result = append(result, a[i]) i++ - } else if a[i].Timestamp > b[j].Timestamp { + case a[i].Timestamp > b[j].Timestamp: result = append(result, b[j]) j++ - } else { + default: result = append(result, a[i]) i++ j++ diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index cd73083bb..f95f0051c 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -824,10 +824,11 @@ mergeLoop: // Merge together all buckets from the original schema that fall into origIdx += span.Offset } currIdx := i.targetIdx(origIdx) - if firstPass { + switch { + case firstPass: i.currIdx = currIdx firstPass = false - } else if currIdx != i.currIdx { + case currIdx != i.currIdx: // Reached next bucket in targetSchema. // Do not actually forward to the next bucket, but break out. break mergeLoop diff --git a/model/labels/labels.go b/model/labels/labels.go index b7398d17f..93524ddcf 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -169,11 +169,12 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { b = b[:0] i, j := 0, 0 for i < len(ls) && j < len(names) { - if names[j] < ls[i].Name { + switch { + case names[j] < ls[i].Name: j++ - } else if ls[i].Name < names[j] { + case ls[i].Name < names[j]: i++ - } else { + default: b = append(b, ls[i].Name...) b = append(b, seps[0]) b = append(b, ls[i].Value...) @@ -213,11 +214,12 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { b.WriteByte(labelSep) i, j := 0, 0 for i < len(ls) && j < len(names) { - if names[j] < ls[i].Name { + switch { + case names[j] < ls[i].Name: j++ - } else if ls[i].Name < names[j] { + case ls[i].Name < names[j]: i++ - } else { + default: if b.Len() > 1 { b.WriteByte(seps[0]) } diff --git a/promql/engine.go b/promql/engine.go index 21b894b93..1f493f129 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -400,7 +400,7 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { } // NewInstantQuery returns an evaluation query for the given expression at the given time. -func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { +func (ng *Engine) NewInstantQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { expr, err := parser.ParseExpr(qs) if err != nil { return nil, err @@ -416,7 +416,7 @@ func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts // NewRangeQuery returns an evaluation query for the given time range and with // the resolution set by the interval. -func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { +func (ng *Engine) NewRangeQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { expr, err := parser.ParseExpr(qs) if err != nil { return nil, err @@ -1979,7 +1979,7 @@ func (ev *evaluator) matrixIterSlice( // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; histograms[drop].T < mint; drop++ { + for drop = 0; histograms[drop].T < mint; drop++ { // nolint:revive } ev.currentSamples -= drop copy(histograms, histograms[drop:]) @@ -2096,13 +2096,13 @@ func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching, } func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector { - if matching.Card != parser.CardManyToMany { + switch { + case matching.Card != parser.CardManyToMany: panic("set operations must only use many-to-many matching") - } - if len(lhs) == 0 { // Short-circuit. + case len(lhs) == 0: // Short-circuit. enh.Out = append(enh.Out, rhs...) return enh.Out - } else if len(rhs) == 0 { + case len(rhs) == 0: enh.Out = append(enh.Out, lhs...) return enh.Out } @@ -2221,13 +2221,14 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * hl, hr = hr, hl } floatValue, histogramValue, keep := vectorElemBinop(op, fl, fr, hl, hr) - if returnBool { + switch { + case returnBool: if keep { floatValue = 1.0 } else { floatValue = 0.0 } - } else if !keep { + case !keep: continue } metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh) @@ -2514,14 +2515,15 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without if !ok { var m labels.Labels enh.resetBuilder(metric) - if without { + switch { + case without: enh.lb.Del(grouping...) enh.lb.Del(labels.MetricName) m = enh.lb.Labels() - } else if len(grouping) > 0 { + case len(grouping) > 0: enh.lb.Keep(grouping...) m = enh.lb.Labels() - } else { + default: m = labels.EmptyLabels() } newAgg := &groupedAggregation{ @@ -2530,9 +2532,10 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without mean: s.F, groupCount: 1, } - if s.H == nil { + switch { + case s.H == nil: newAgg.hasFloat = true - } else if op == parser.SUM { + case op == parser.SUM: newAgg.histogramValue = s.H.Copy() newAgg.hasHistogram = true } @@ -2542,9 +2545,10 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without inputVecLen := int64(len(vec)) resultSize := k - if k > inputVecLen { + switch { + case k > inputVecLen: resultSize = inputVecLen - } else if k == 0 { + case k == 0: resultSize = 1 } switch op { @@ -2637,12 +2641,13 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without case parser.TOPK: // We build a heap of up to k elements, with the smallest element at heap[0]. - if int64(len(group.heap)) < k { + switch { + case int64(len(group.heap)) < k: heap.Push(&group.heap, &Sample{ F: s.F, Metric: s.Metric, }) - } else if group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)) { + case group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)): // This new element is bigger than the previous smallest element - overwrite that. group.heap[0] = Sample{ F: s.F, @@ -2655,12 +2660,13 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without case parser.BOTTOMK: // We build a heap of up to k elements, with the biggest element at heap[0]. - if int64(len(group.reverseHeap)) < k { + switch { + case int64(len(group.reverseHeap)) < k: heap.Push(&group.reverseHeap, &Sample{ F: s.F, Metric: s.Metric, }) - } else if group.reverseHeap[0].F > s.F || (math.IsNaN(group.reverseHeap[0].F) && !math.IsNaN(s.F)) { + case group.reverseHeap[0].F > s.F || (math.IsNaN(group.reverseHeap[0].F) && !math.IsNaN(s.F)): // This new element is smaller than the previous biggest element - overwrite that. group.reverseHeap[0] = Sample{ F: s.F, @@ -2819,9 +2825,10 @@ func PreprocessExpr(expr parser.Expr, start, end time.Time) parser.Expr { func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool { switch n := expr.(type) { case *parser.VectorSelector: - if n.StartOrEnd == parser.START { + switch n.StartOrEnd { + case parser.START: n.Timestamp = makeInt64Pointer(timestamp.FromTime(start)) - } else if n.StartOrEnd == parser.END { + case parser.END: n.Timestamp = makeInt64Pointer(timestamp.FromTime(end)) } return n.Timestamp != nil @@ -2878,9 +2885,10 @@ func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool { if isInvariant { n.Expr = newStepInvariantExpr(n.Expr) } - if n.StartOrEnd == parser.START { + switch n.StartOrEnd { + case parser.START: n.Timestamp = makeInt64Pointer(timestamp.FromTime(start)) - } else if n.StartOrEnd == parser.END { + case parser.END: n.Timestamp = makeInt64Pointer(timestamp.FromTime(end)) } return n.Timestamp != nil diff --git a/promql/functions.go b/promql/functions.go index 0c22cb44c..2983de4fd 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -804,12 +804,14 @@ func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) V // === sgn(Vector parser.ValueTypeVector) Vector === func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { return simpleFunc(vals, enh, func(v float64) float64 { - if v < 0 { + switch { + case v < 0: return -1 - } else if v > 0 { + case v > 0: return 1 + default: + return v } - return v }) } diff --git a/promql/parser/ast.go b/promql/parser/ast.go index 190af2d59..f156fc602 100644 --- a/promql/parser/ast.go +++ b/promql/parser/ast.go @@ -368,13 +368,14 @@ func Children(node Node) []Node { case *AggregateExpr: // While this does not look nice, it should avoid unnecessary allocations // caused by slice resizing - if n.Expr == nil && n.Param == nil { + switch { + case n.Expr == nil && n.Param == nil: return nil - } else if n.Expr == nil { + case n.Expr == nil: return []Node{n.Param} - } else if n.Param == nil { + case n.Param == nil: return []Node{n.Expr} - } else { + default: return []Node{n.Expr, n.Param} } case *BinaryExpr: diff --git a/promql/parser/lex.go b/promql/parser/lex.go index 657dc2809..fe5a8abfe 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -347,9 +347,10 @@ func lexStatements(l *Lexer) stateFn { switch r := l.next(); { case r == eof: - if l.parenDepth != 0 { + switch { + case l.parenDepth != 0: return l.errorf("unclosed left parenthesis") - } else if l.bracketOpen { + case l.bracketOpen: return l.errorf("unclosed left bracket") } l.emit(EOF) @@ -371,12 +372,13 @@ func lexStatements(l *Lexer) stateFn { case r == '^': l.emit(POW) case r == '=': - if t := l.peek(); t == '=' { + switch t := l.peek(); t { + case '=': l.next() l.emit(EQLC) - } else if t == '~' { + case '~': return l.errorf("unexpected character after '=': %q", t) - } else { + default: l.emit(EQL) } case r == '!': @@ -791,11 +793,12 @@ Loop: default: l.backup() word := l.input[l.start:l.pos] - if kw, ok := key[strings.ToLower(word)]; ok { + switch kw, ok := key[strings.ToLower(word)]; { + case ok: l.emit(kw) - } else if !strings.Contains(word, ":") { + case !strings.Contains(word, ":"): l.emit(IDENTIFIER) - } else { + default: l.emit(METRIC_IDENTIFIER) } break Loop diff --git a/promql/parser/parse.go b/promql/parser/parse.go index 3c32f3c05..e69ed4595 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -270,14 +270,15 @@ var errUnexpected = errors.New("unexpected error") // recover is the handler that turns panics into returns from the top level of Parse. func (p *parser) recover(errp *error) { e := recover() - if _, ok := e.(runtime.Error); ok { + switch _, ok := e.(runtime.Error); { + case ok: // Print the stack trace but do not inhibit the running application. buf := make([]byte, 64<<10) buf = buf[:runtime.Stack(buf, false)] fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf) *errp = errUnexpected - } else if e != nil { + case e != nil: *errp = e.(error) } } @@ -518,20 +519,18 @@ func (p *parser) checkAST(node Node) (typ ValueType) { p.addParseErrf(n.RHS.PositionRange(), "binary expression must contain only scalar and instant vector types") } - if (lt != ValueTypeVector || rt != ValueTypeVector) && n.VectorMatching != nil { + switch { + case (lt != ValueTypeVector || rt != ValueTypeVector) && n.VectorMatching != nil: if len(n.VectorMatching.MatchingLabels) > 0 { p.addParseErrf(n.PositionRange(), "vector matching only allowed between instant vectors") } n.VectorMatching = nil - } else { - // Both operands are Vectors. - if n.Op.IsSetOperator() { - if n.VectorMatching.Card == CardOneToMany || n.VectorMatching.Card == CardManyToOne { - p.addParseErrf(n.PositionRange(), "no grouping allowed for %q operation", n.Op) - } - if n.VectorMatching.Card != CardManyToMany { - p.addParseErrf(n.PositionRange(), "set operations must always be many-to-many") - } + case n.Op.IsSetOperator(): // Both operands are Vectors. + if n.VectorMatching.Card == CardOneToMany || n.VectorMatching.Card == CardManyToOne { + p.addParseErrf(n.PositionRange(), "no grouping allowed for %q operation", n.Op) + } + if n.VectorMatching.Card != CardManyToMany { + p.addParseErrf(n.PositionRange(), "set operations must always be many-to-many") } } @@ -708,9 +707,10 @@ func (p *parser) addOffset(e Node, offset time.Duration) { } // it is already ensured by parseDuration func that there never will be a zero offset modifier - if *orgoffsetp != 0 { + switch { + case *orgoffsetp != 0: p.addParseErrf(e.PositionRange(), "offset may not be set multiple times") - } else if orgoffsetp != nil { + case orgoffsetp != nil: *orgoffsetp = offset } diff --git a/promql/parser/printer.go b/promql/parser/printer.go index 1f15eeef3..ff171f215 100644 --- a/promql/parser/printer.go +++ b/promql/parser/printer.go @@ -124,17 +124,19 @@ func (node *MatrixSelector) String() string { // Copy the Vector selector before changing the offset vecSelector := *node.VectorSelector.(*VectorSelector) offset := "" - if vecSelector.OriginalOffset > time.Duration(0) { + switch { + case vecSelector.OriginalOffset > time.Duration(0): offset = fmt.Sprintf(" offset %s", model.Duration(vecSelector.OriginalOffset)) - } else if vecSelector.OriginalOffset < time.Duration(0) { + case vecSelector.OriginalOffset < time.Duration(0): offset = fmt.Sprintf(" offset -%s", model.Duration(-vecSelector.OriginalOffset)) } at := "" - if vecSelector.Timestamp != nil { + switch { + case vecSelector.Timestamp != nil: at = fmt.Sprintf(" @ %.3f", float64(*vecSelector.Timestamp)/1000.0) - } else if vecSelector.StartOrEnd == START { + case vecSelector.StartOrEnd == START: at = " @ start()" - } else if vecSelector.StartOrEnd == END { + case vecSelector.StartOrEnd == END: at = " @ end()" } @@ -162,17 +164,19 @@ func (node *SubqueryExpr) getSubqueryTimeSuffix() string { step = model.Duration(node.Step).String() } offset := "" - if node.OriginalOffset > time.Duration(0) { + switch { + case node.OriginalOffset > time.Duration(0): offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset)) - } else if node.OriginalOffset < time.Duration(0) { + case node.OriginalOffset < time.Duration(0): offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset)) } at := "" - if node.Timestamp != nil { + switch { + case node.Timestamp != nil: at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0) - } else if node.StartOrEnd == START { + case node.StartOrEnd == START: at = " @ start()" - } else if node.StartOrEnd == END { + case node.StartOrEnd == END: at = " @ end()" } return fmt.Sprintf("[%s:%s]%s%s", model.Duration(node.Range), step, at, offset) @@ -207,17 +211,19 @@ func (node *VectorSelector) String() string { labelStrings = append(labelStrings, matcher.String()) } offset := "" - if node.OriginalOffset > time.Duration(0) { + switch { + case node.OriginalOffset > time.Duration(0): offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset)) - } else if node.OriginalOffset < time.Duration(0) { + case node.OriginalOffset < time.Duration(0): offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset)) } at := "" - if node.Timestamp != nil { + switch { + case node.Timestamp != nil: at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0) - } else if node.StartOrEnd == START { + case node.StartOrEnd == START: at = " @ start()" - } else if node.StartOrEnd == END { + case node.StartOrEnd == END: at = " @ end()" } diff --git a/promql/quantile.go b/promql/quantile.go index aaead671c..78d0bbaf0 100644 --- a/promql/quantile.go +++ b/promql/quantile.go @@ -169,11 +169,12 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { } } if bucket.Lower < 0 && bucket.Upper > 0 { - if len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0 { + switch { + case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0: // The result is in the zero bucket and the histogram has only // positive buckets. So we consider 0 to be the lower bound. bucket.Lower = 0 - } else if len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0 { + case len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0: // The result is in the zero bucket and the histogram has only // negative buckets. So we consider 0 to be the upper bound. bucket.Upper = 0 @@ -244,12 +245,13 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6 for it.Next() { b := it.At() if b.Lower < 0 && b.Upper > 0 { - if len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0 { + switch { + case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0: // This is the zero bucket and the histogram has only // positive buckets. So we consider 0 to be the lower // bound. b.Lower = 0 - } else if len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0 { + case len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0: // This is in the zero bucket and the histogram has only // negative buckets. So we consider 0 to be the upper // bound. diff --git a/rules/alerting_test.go b/rules/alerting_test.go index 13aab9804..8cd0da281 100644 --- a/rules/alerting_test.go +++ b/rules/alerting_test.go @@ -587,10 +587,10 @@ func TestAlertingRuleLimit(t *testing.T) { evalTime := time.Unix(0, 0) for _, test := range tests { - _, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit) - if err != nil { + switch _, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit); { + case err != nil: require.EqualError(t, err, test.err) - } else if test.err != "" { + case test.err != "": t.Errorf("Expected errror %s, got none", test.err) } } diff --git a/rules/manager_test.go b/rules/manager_test.go index 16bb080f5..26a790964 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -481,17 +481,18 @@ func TestForStateRestore(t *testing.T) { }) // Checking if we have restored it correctly. - if tst.noRestore { + switch { + case tst.noRestore: require.Equal(t, tst.num, len(got)) for _, e := range got { require.Equal(t, e.ActiveAt, restoreTime) } - } else if tst.gracePeriod { + case tst.gracePeriod: require.Equal(t, tst.num, len(got)) for _, e := range got { require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime)) } - } else { + default: exp := tst.alerts require.Equal(t, len(exp), len(got)) sortAlerts(exp) diff --git a/rules/recording_test.go b/rules/recording_test.go index 61f47e048..35a0b1a0b 100644 --- a/rules/recording_test.go +++ b/rules/recording_test.go @@ -223,10 +223,10 @@ func TestRecordingRuleLimit(t *testing.T) { evalTime := time.Unix(0, 0) for _, test := range tests { - _, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit) - if err != nil { + switch _, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit); { + case err != nil: require.EqualError(t, err, test.err) - } else if test.err != "" { + case test.err != "": t.Errorf("Expected error %s, got none", test.err) } } diff --git a/scrape/manager.go b/scrape/manager.go index 69a0eaa1f..d75fe30cf 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -288,10 +288,11 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { // Cleanup and reload pool if the configuration has changed. var failed bool for name, sp := range m.scrapePools { - if cfg, ok := m.scrapeConfigs[name]; !ok { + switch cfg, ok := m.scrapeConfigs[name]; { + case !ok: sp.stop() delete(m.scrapePools, name) - } else if !reflect.DeepEqual(sp.config, cfg) { + case !reflect.DeepEqual(sp.config, cfg): err := sp.reload(cfg) if err != nil { level.Error(m.logger).Log("msg", "error reloading scrape pool", "err", err, "scrape_pool", name) diff --git a/scrape/scrape.go b/scrape/scrape.go index f38527ff3..680585af8 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -503,9 +503,10 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { // Replicate .Labels().IsEmpty() with a loop here to avoid generating garbage. nonEmpty := false t.LabelsRange(func(l labels.Label) { nonEmpty = true }) - if nonEmpty { + switch { + case nonEmpty: all = append(all, t) - } else if !t.discoveredLabels.IsEmpty() { + case !t.discoveredLabels.IsEmpty(): sp.droppedTargets = append(sp.droppedTargets, t) } } @@ -946,9 +947,10 @@ func (c *scrapeCache) iterDone(flushCache bool) { count := len(c.series) + len(c.droppedSeries) + len(c.metadata) c.metaMtx.Unlock() - if flushCache { + switch { + case flushCache: c.successfulCount = count - } else if count > c.successfulCount*2+1000 { + case count > c.successfulCount*2+1000: // If a target had varying labels in scrapes that ultimately failed, // the caches would grow indefinitely. Force a flush when this happens. // We use the heuristic that this is a doubling of the cache size diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index a8028b652..6b4b2d5f5 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -724,9 +724,10 @@ func TestScrapeLoopStop(t *testing.T) { // All samples in a scrape must have the same timestamp. var ts int64 for i, s := range appender.result { - if i%6 == 0 { + switch { + case i%6 == 0: ts = s.t - } else if s.t != ts { + case s.t != ts: t.Fatalf("Unexpected multiple timestamps within single scrape") } } @@ -1139,10 +1140,11 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { numScrapes++ - if numScrapes == 1 { + switch numScrapes { + case 1: w.Write([]byte("metric_a 42\n")) return nil - } else if numScrapes == 5 { + case 5: cancel() } return errors.New("scrape failed") @@ -1200,13 +1202,14 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { numScrapes++ - if numScrapes == 1 { + switch numScrapes { + case 1: w.Write([]byte("metric_a 42\n")) return nil - } else if numScrapes == 2 { + case 2: w.Write([]byte("7&-\n")) return nil - } else if numScrapes == 3 { + case 3: cancel() } return errors.New("scrape failed") @@ -1265,14 +1268,15 @@ func TestScrapeLoopCache(t *testing.T) { numScrapes := 0 scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { - if numScrapes == 1 || numScrapes == 2 { + switch numScrapes { + case 1, 2: if _, ok := sl.cache.series["metric_a"]; !ok { t.Errorf("metric_a missing from cache after scrape %d", numScrapes) } if _, ok := sl.cache.series["metric_b"]; !ok { t.Errorf("metric_b missing from cache after scrape %d", numScrapes) } - } else if numScrapes == 3 { + case 3: if _, ok := sl.cache.series["metric_a"]; !ok { t.Errorf("metric_a missing from cache after scrape %d", numScrapes) } @@ -1283,13 +1287,14 @@ func TestScrapeLoopCache(t *testing.T) { numScrapes++ - if numScrapes == 1 { + switch numScrapes { + case 1: w.Write([]byte("metric_a 42\nmetric_b 43\n")) return nil - } else if numScrapes == 3 { + case 3: w.Write([]byte("metric_a 44\n")) return nil - } else if numScrapes == 4 { + case 4: cancel() } return fmt.Errorf("scrape failed") @@ -2280,11 +2285,12 @@ func TestTargetScrapeScrapeCancel(t *testing.T) { go func() { _, err := ts.scrape(ctx, io.Discard) - if err == nil { + switch { + case err == nil: errc <- errors.New("Expected error but got nil") - } else if ctx.Err() != context.Canceled { + case ctx.Err() != context.Canceled: errc <- errors.Errorf("Expected context cancellation error but got: %s", ctx.Err()) - } else { + default: close(errc) } }() diff --git a/storage/fanout.go b/storage/fanout.go index 4f995afba..a9db4f628 100644 --- a/storage/fanout.go +++ b/storage/fanout.go @@ -222,9 +222,10 @@ func (f *fanoutAppender) Rollback() (err error) { for _, appender := range f.secondaries { rollbackErr := appender.Rollback() - if err == nil { + switch { + case err == nil: err = rollbackErr - } else if rollbackErr != nil { + case rollbackErr != nil: level.Error(f.logger).Log("msg", "Squashed rollback error on rollback", "err", rollbackErr) } } diff --git a/storage/merge.go b/storage/merge.go index 193a02522..c0665d720 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -197,13 +197,14 @@ func mergeStrings(a, b []string) []string { res := make([]string, 0, maxl*10/9) for len(a) > 0 && len(b) > 0 { - if a[0] == b[0] { + switch { + case a[0] == b[0]: res = append(res, a[0]) a, b = a[1:], b[1:] - } else if a[0] < b[0] { + case a[0] < b[0]: res = append(res, a[0]) a = a[1:] - } else { + default: res = append(res, b[0]) b = b[1:] } diff --git a/storage/remote/codec.go b/storage/remote/codec.go index bfbd08d24..2ceed4de1 100644 --- a/storage/remote/codec.go +++ b/storage/remote/codec.go @@ -291,13 +291,14 @@ func MergeLabels(primary, secondary []prompb.Label) []prompb.Label { result := make([]prompb.Label, 0, len(primary)+len(secondary)) i, j := 0, 0 for i < len(primary) && j < len(secondary) { - if primary[i].Name < secondary[j].Name { + switch { + case primary[i].Name < secondary[j].Name: result = append(result, primary[i]) i++ - } else if primary[i].Name > secondary[j].Name { + case primary[i].Name > secondary[j].Name: result = append(result, secondary[j]) j++ - } else { + default: result = append(result, primary[i]) i++ j++ @@ -429,7 +430,8 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType { return c.series.histograms[n+c.histogramsCur].Timestamp >= t }) - if c.floatsCur < len(c.series.floats) && c.histogramsCur < len(c.series.histograms) { + switch { + case c.floatsCur < len(c.series.floats) && c.histogramsCur < len(c.series.histograms): // If float samples and histogram samples have overlapping timestamps prefer the float samples. if c.series.floats[c.floatsCur].Timestamp <= c.series.histograms[c.histogramsCur].Timestamp { c.curValType = chunkenc.ValFloat @@ -445,9 +447,9 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType { c.floatsCur-- } } - } else if c.floatsCur < len(c.series.floats) { + case c.floatsCur < len(c.series.floats): c.curValType = chunkenc.ValFloat - } else if c.histogramsCur < len(c.series.histograms) { + case c.histogramsCur < len(c.series.histograms): c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur]) } @@ -515,18 +517,19 @@ func (c *concreteSeriesIterator) Next() chunkenc.ValueType { } c.curValType = chunkenc.ValNone - if peekFloatTS < peekHistTS { + switch { + case peekFloatTS < peekHistTS: c.floatsCur++ c.curValType = chunkenc.ValFloat - } else if peekHistTS < peekFloatTS { + case peekHistTS < peekFloatTS: c.histogramsCur++ c.curValType = chunkenc.ValHistogram - } else if peekFloatTS == noTS && peekHistTS == noTS { + case peekFloatTS == noTS && peekHistTS == noTS: // This only happens when the iterator is exhausted; we set the cursors off the end to prevent // Seek() from returning anything afterwards. c.floatsCur = len(c.series.floats) c.histogramsCur = len(c.series.histograms) - } else { + default: // Prefer float samples to histogram samples if there's a conflict. We advance the cursor for histograms // anyway otherwise the histogram sample will get selected on the next call to Next(). c.floatsCur++ diff --git a/storage/remote/ewma.go b/storage/remote/ewma.go index c7fb0289b..ea4472c49 100644 --- a/storage/remote/ewma.go +++ b/storage/remote/ewma.go @@ -55,9 +55,10 @@ func (r *ewmaRate) tick() { r.mutex.Lock() defer r.mutex.Unlock() - if r.init { + switch { + case r.init: r.lastRate += r.alpha * (instantRate - r.lastRate) - } else if newEvents > 0 { + case newEvents > 0: r.init = true r.lastRate = instantRate } diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 62bd17a66..10fb6d153 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -1030,9 +1030,10 @@ func (t *QueueManager) calculateDesiredShards() int { return t.numShards } - if numShards > t.cfg.MaxShards { + switch { + case numShards > t.cfg.MaxShards: numShards = t.cfg.MaxShards - } else if numShards < t.cfg.MinShards { + case numShards < t.cfg.MinShards: numShards = t.cfg.MinShards } return numShards @@ -1575,10 +1576,11 @@ func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, l } sleepDuration = backoff - if backoffErr.retryAfter > 0 { + switch { + case backoffErr.retryAfter > 0: sleepDuration = backoffErr.retryAfter level.Info(l).Log("msg", "Retrying after duration specified by Retry-After header", "duration", sleepDuration) - } else if backoffErr.retryAfter < 0 { + case backoffErr.retryAfter < 0: level.Debug(l).Log("msg", "retry-after cannot be in past, retrying using default backoff mechanism") } diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index cb075f306..3343ee18e 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -951,7 +951,8 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int return 0, storage.ErrOutOfOrderSample } - if h != nil { + switch { + case h != nil: // NOTE: always modify pendingHistograms and histogramSeries together a.pendingHistograms = append(a.pendingHistograms, record.RefHistogramSample{ Ref: series.ref, @@ -959,7 +960,7 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int H: h, }) a.histogramSeries = append(a.histogramSeries, series) - } else if fh != nil { + case fh != nil: // NOTE: always modify pendingFloatHistograms and floatHistogramSeries together a.pendingFloatHistograms = append(a.pendingFloatHistograms, record.RefFloatHistogramSample{ Ref: series.ref, diff --git a/tsdb/chunkenc/xor.go b/tsdb/chunkenc/xor.go index ba2d96d36..133ed9952 100644 --- a/tsdb/chunkenc/xor.go +++ b/tsdb/chunkenc/xor.go @@ -164,14 +164,15 @@ func (a *xorAppender) Append(t int64, v float64) { var tDelta uint64 num := binary.BigEndian.Uint16(a.b.bytes()) - if num == 0 { + switch num { + case 0: buf := make([]byte, binary.MaxVarintLen64) for _, b := range buf[:binary.PutVarint(buf, t)] { a.b.writeByte(b) } a.b.writeBits(math.Float64bits(v), 64) - } else if num == 1 { + case 1: tDelta = uint64(t - a.t) buf := make([]byte, binary.MaxVarintLen64) @@ -181,7 +182,7 @@ func (a *xorAppender) Append(t int64, v float64) { a.writeVDelta(v) - } else { + default: tDelta = uint64(t - a.t) dod := int64(tDelta - a.tDelta) diff --git a/tsdb/chunks/head_chunks.go b/tsdb/chunks/head_chunks.go index a7ff90475..bcdab2125 100644 --- a/tsdb/chunks/head_chunks.go +++ b/tsdb/chunks/head_chunks.go @@ -999,9 +999,10 @@ func (cdm *ChunkDiskMapper) DeleteCorrupted(originalErr error) error { cdm.readPathMtx.RLock() lastSeq := 0 for seg := range cdm.mmappedChunkFiles { - if seg >= cerr.FileIndex { + switch { + case seg >= cerr.FileIndex: segs = append(segs, seg) - } else if seg > lastSeq { + case seg > lastSeq: lastSeq = seg } } diff --git a/tsdb/db.go b/tsdb/db.go index 659251c3c..45c0771b1 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -963,10 +963,11 @@ func (db *DB) ApplyConfig(conf *config.Config) error { // Create WBL if it was not present and if OOO is enabled with WAL enabled. var wblog *wlog.WL var err error - if db.head.wbl != nil { + switch { + case db.head.wbl != nil: // The existing WBL from the disk might have been replayed while OOO was disabled. wblog = db.head.wbl - } else if !db.oooWasEnabled.Load() && oooTimeWindow > 0 && db.opts.WALSegmentSize >= 0 { + case !db.oooWasEnabled.Load() && oooTimeWindow > 0 && db.opts.WALSegmentSize >= 0: segmentSize := wlog.DefaultSegmentSize // Wal is set to a custom size. if db.opts.WALSegmentSize > 0 { @@ -1532,10 +1533,11 @@ func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error { } toDelete := filepath.Join(db.dir, ulid.String()) - if _, err := os.Stat(toDelete); os.IsNotExist(err) { + switch _, err := os.Stat(toDelete); { + case os.IsNotExist(err): // Noop. continue - } else if err != nil { + case err != nil: return errors.Wrapf(err, "stat dir %v", toDelete) } diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 6bc91ae06..86cb09751 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -344,9 +344,10 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 } if value.IsStaleNaN(v) { - if s.lastHistogramValue != nil { + switch { + case s.lastHistogramValue != nil: return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}, nil) - } else if s.lastFloatHistogramValue != nil { + case s.lastFloatHistogramValue != nil: return a.AppendHistogram(ref, lset, t, nil, &histogram.FloatHistogram{Sum: v}) } } @@ -552,9 +553,10 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels return 0, err } if created { - if h != nil { + switch { + case h != nil: s.lastHistogramValue = &histogram.Histogram{} - } else if fh != nil { + case fh != nil: s.lastFloatHistogramValue = &histogram.FloatHistogram{} } a.series = append(a.series, record.RefSeries{ @@ -564,7 +566,8 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels } } - if h != nil { + switch { + case h != nil: s.Lock() if err := s.appendableHistogram(t, h); err != nil { s.Unlock() @@ -581,7 +584,7 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels H: h, }) a.histogramSeries = append(a.histogramSeries, s) - } else if fh != nil { + case fh != nil: s.Lock() if err := s.appendableFloatHistogram(t, fh); err != nil { s.Unlock() @@ -938,7 +941,10 @@ func (a *headAppender) Commit() (err error) { var ok, chunkCreated bool - if err == nil && oooSample { + switch { + case err != nil: + // Do nothing here. + case oooSample: // Sample is OOO and OOO handling is enabled // and the delta is within the OOO tolerance. var mmapRef chunks.ChunkDiskMapperRef @@ -976,7 +982,7 @@ func (a *headAppender) Commit() (err error) { // TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305. samplesAppended-- } - } else if err == nil { + default: ok, chunkCreated = series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper, chunkRange) if ok { if s.T < inOrderMint { @@ -1177,14 +1183,15 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui app.RecodeHistogram(h, pBackwardInserts, nBackwardInserts) } // We have 3 cases here - // - !okToAppend -> We need to cut a new chunk. + // - !okToAppend or counterReset -> We need to cut a new chunk. // - okToAppend but we have inserts → Existing chunk needs // recoding before we can append our histogram. // - okToAppend and no inserts → Chunk is ready to support our histogram. - if !okToAppend || counterReset { + switch { + case !okToAppend || counterReset: c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange) chunkCreated = true - } else if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 { + case len(pForwardInserts) > 0 || len(nForwardInserts) > 0: // New buckets have appeared. We need to recode all // prior histogram samples within the chunk before we // can process this one. @@ -1270,14 +1277,15 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, app.RecodeHistogramm(fh, pBackwardInserts, nBackwardInserts) } // We have 3 cases here - // - !okToAppend -> We need to cut a new chunk. + // - !okToAppend or counterReset -> We need to cut a new chunk. // - okToAppend but we have inserts → Existing chunk needs // recoding before we can append our histogram. // - okToAppend and no inserts → Chunk is ready to support our histogram. - if !okToAppend || counterReset { + switch { + case !okToAppend || counterReset: c = s.cutNewHeadChunk(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange) chunkCreated = true - } else if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 { + case len(pForwardInserts) > 0 || len(nForwardInserts) > 0: // New buckets have appeared. We need to recode all // prior histogram samples within the chunk before we // can process this one. diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 9c40bcd7a..9c546ab16 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -424,7 +424,8 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper break } - if chunkRef == meta.OOOLastRef { + switch { + case chunkRef == meta.OOOLastRef: tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{ meta: chunks.Meta{ MinTime: meta.OOOLastMinTime, @@ -435,7 +436,7 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper origMinT: c.minTime, origMaxT: c.maxTime, }) - } else if c.OverlapsClosedInterval(mint, maxt) { + case c.OverlapsClosedInterval(mint, maxt): tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{ meta: chunks.Meta{ MinTime: c.minTime, @@ -594,12 +595,14 @@ type boundedIterator struct { func (b boundedIterator) Next() chunkenc.ValueType { for b.Iterator.Next() == chunkenc.ValFloat { t, _ := b.Iterator.At() - if t < b.minT { + switch { + case t < b.minT: continue - } else if t > b.maxT { + case t > b.maxT: return chunkenc.ValNone + default: + return chunkenc.ValFloat } - return chunkenc.ValFloat } return chunkenc.ValNone } diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 39bcf4c78..a183e99c0 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -2960,10 +2960,11 @@ func TestAppendHistogram(t *testing.T) { actHistograms := make([]tsdbutil.Sample, 0, len(expHistograms)) actFloatHistograms := make([]tsdbutil.Sample, 0, len(expFloatHistograms)) for typ := it.Next(); typ != chunkenc.ValNone; typ = it.Next() { - if typ == chunkenc.ValHistogram { + switch typ { + case chunkenc.ValHistogram: ts, h := it.AtHistogram() actHistograms = append(actHistograms, sample{t: ts, h: h}) - } else if typ == chunkenc.ValFloatHistogram { + case chunkenc.ValFloatHistogram: ts, fh := it.AtFloatHistogram() actFloatHistograms = append(actFloatHistograms, sample{t: ts, fh: fh}) } @@ -3565,14 +3566,15 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) { for i, eh := range expHistograms { ah := actHistograms[i] if floatHistogram { - if value.IsStaleNaN(eh.fh.Sum) { + switch { + case value.IsStaleNaN(eh.fh.Sum): actNumStale++ require.True(t, value.IsStaleNaN(ah.fh.Sum)) // To make require.Equal work. ah.fh.Sum = 0 eh.fh = eh.fh.Copy() eh.fh.Sum = 0 - } else if i > 0 { + case i > 0: prev := expHistograms[i-1] if prev.fh == nil || value.IsStaleNaN(prev.fh.Sum) { eh.fh.CounterResetHint = histogram.UnknownCounterReset @@ -3580,14 +3582,15 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) { } require.Equal(t, eh, ah) } else { - if value.IsStaleNaN(eh.h.Sum) { + switch { + case value.IsStaleNaN(eh.h.Sum): actNumStale++ require.True(t, value.IsStaleNaN(ah.h.Sum)) // To make require.Equal work. ah.h.Sum = 0 eh.h = eh.h.Copy() eh.h.Sum = 0 - } else if i > 0 { + case i > 0: prev := expHistograms[i-1] if prev.h == nil || value.IsStaleNaN(prev.h.Sum) { eh.h.CounterResetHint = histogram.UnknownCounterReset @@ -4488,19 +4491,19 @@ func TestHistogramValidation(t *testing.T) { for testName, tc := range tests { t.Run(testName, func(t *testing.T) { - err := ValidateHistogram(tc.h) - if tc.errMsg != "" { + switch err := ValidateHistogram(tc.h); { + case tc.errMsg != "": require.ErrorContains(t, err, tc.errMsg) - } else { + default: require.NoError(t, err) } - err = ValidateFloatHistogram(tc.h.ToFloat()) - if tc.errMsgFloat != "" { + switch err := ValidateFloatHistogram(tc.h.ToFloat()); { + case tc.errMsgFloat != "": require.ErrorContains(t, err, tc.errMsgFloat) - } else if tc.errMsg != "" { + case tc.errMsg != "": require.ErrorContains(t, err, tc.errMsg) - } else { + default: require.NoError(t, err) } }) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 15df374fc..514775210 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -565,12 +565,11 @@ func newMergedPostings(p []Postings) (m *mergedPostings, nonEmpty bool) { for _, it := range p { // NOTE: mergedPostings struct requires the user to issue an initial Next. - if it.Next() { + switch { + case it.Next(): ph = append(ph, it) - } else { - if it.Err() != nil { - return &mergedPostings{err: it.Err()}, true - } + case it.Err() != nil: + return &mergedPostings{err: it.Err()}, true } } @@ -704,16 +703,16 @@ func (rp *removedPostings) Next() bool { return true } - fcur, rcur := rp.full.At(), rp.remove.At() - if fcur < rcur { + switch fcur, rcur := rp.full.At(), rp.remove.At(); { + case fcur < rcur: rp.cur = fcur rp.fok = rp.full.Next() return true - } else if rcur < fcur { + case rcur < fcur: // Forward the remove postings to the right position. rp.rok = rp.remove.Seek(fcur) - } else { + default: // Skip the current posting. rp.fok = rp.full.Next() } @@ -848,9 +847,10 @@ func (it *bigEndianPostings) Err() error { func FindIntersectingPostings(p Postings, candidates []Postings) (indexes []int, err error) { h := make(postingsWithIndexHeap, 0, len(candidates)) for idx, it := range candidates { - if it.Next() { + switch { + case it.Next(): h = append(h, postingsWithIndex{index: idx, p: it}) - } else if it.Err() != nil { + case it.Err() != nil: return nil, it.Err() } } diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index fd16c6ca8..8ba3ea39a 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -123,7 +123,7 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra } } - // There is nothing to do if we did not collect any chunk + // There is nothing to do if we did not collect any chunk. if len(tmpChks) == 0 { return nil } @@ -136,14 +136,15 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra // chunks Meta the first chunk that overlaps with others. // Example chunks of a series: 5:(100, 200) 6:(500, 600) 7:(150, 250) 8:(550, 650) // In the example 5 overlaps with 7 and 6 overlaps with 8 so we only want to - // to return chunk Metas for chunk 5 and chunk 6 + // to return chunk Metas for chunk 5 and chunk 6e *chks = append(*chks, tmpChks[0]) - maxTime := tmpChks[0].MaxTime // tracks the maxTime of the previous "to be merged chunk" + maxTime := tmpChks[0].MaxTime // Tracks the maxTime of the previous "to be merged chunk". for _, c := range tmpChks[1:] { - if c.MinTime > maxTime { + switch { + case c.MinTime > maxTime: *chks = append(*chks, c) maxTime = c.MaxTime - } else if c.MaxTime > maxTime { + case c.MaxTime > maxTime: maxTime = c.MaxTime (*chks)[len(*chks)-1].MaxTime = c.MaxTime } diff --git a/tsdb/querier.go b/tsdb/querier.go index f3852d517..abddebb87 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -239,18 +239,20 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings, } for _, m := range ms { - if m.Name == "" && m.Value == "" { // Special-case for AllPostings, used in tests at least. + switch { + case m.Name == "" && m.Value == "": // Special-case for AllPostings, used in tests at least. k, v := index.AllPostingsKey() allPostings, err := ix.Postings(k, v) if err != nil { return nil, err } its = append(its, allPostings) - } else if labelMustBeSet[m.Name] { + case labelMustBeSet[m.Name]: // If this matcher must be non-empty, we can be smarter. matchesEmpty := m.Matches("") isNot := m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp - if isNot && matchesEmpty { // l!="foo" + switch { + case isNot && matchesEmpty: // l!="foo" // If the label can't be empty and is a Not and the inner matcher // doesn't match empty, then subtract it out at the end. inverse, err := m.Inverse() @@ -263,7 +265,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings, return nil, err } notIts = append(notIts, it) - } else if isNot && !matchesEmpty { // l!="" + case isNot && !matchesEmpty: // l!="" // If the label can't be empty and is a Not, but the inner matcher can // be empty we need to use inversePostingsForMatcher. inverse, err := m.Inverse() @@ -279,7 +281,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings, return index.EmptyPostings(), nil } its = append(its, it) - } else { // l="a" + default: // l="a" // Non-Not matcher, use normal postingsForMatcher. it, err := postingsForMatcher(ix, m) if err != nil { @@ -290,7 +292,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings, } its = append(its, it) } - } else { // l="" + default: // l="" // If the matchers for a labelname selects an empty value, it selects all // the series which don't have the label name set too. See: // https://github.com/prometheus/prometheus/issues/3575 and @@ -966,23 +968,24 @@ func (m *mergedStringIter) Next() bool { return false } - if !m.aok { + switch { + case !m.aok: m.cur = m.b.At() m.bok = m.b.Next() m.err = m.b.Err() - } else if !m.bok { + case !m.bok: m.cur = m.a.At() m.aok = m.a.Next() m.err = m.a.Err() - } else if m.b.At() > m.a.At() { + case m.b.At() > m.a.At(): m.cur = m.a.At() m.aok = m.a.Next() m.err = m.a.Err() - } else if m.a.At() > m.b.At() { + case m.a.At() > m.b.At(): m.cur = m.b.At() m.bok = m.b.Next() m.err = m.b.Err() - } else { // Equal. + default: // Equal. m.cur = m.b.At() m.aok = m.a.Next() m.err = m.a.Err() diff --git a/tsdb/tombstones/tombstones.go b/tsdb/tombstones/tombstones.go index f7e2a2a1e..a52e1caa9 100644 --- a/tsdb/tombstones/tombstones.go +++ b/tsdb/tombstones/tombstones.go @@ -190,9 +190,10 @@ type Stone struct { func ReadTombstones(dir string) (Reader, int64, error) { b, err := os.ReadFile(filepath.Join(dir, TombstonesFilename)) - if os.IsNotExist(err) { + switch { + case os.IsNotExist(err): return NewMemTombstones(), 0, nil - } else if err != nil { + case err != nil: return nil, 0, err } diff --git a/tsdb/wal.go b/tsdb/wal.go index 93d445a12..0c57865e6 100644 --- a/tsdb/wal.go +++ b/tsdb/wal.go @@ -522,9 +522,10 @@ func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) { } }() - if n, err := f.Read(metab); err != nil { + switch n, err := f.Read(metab); { + case err != nil: return nil, errors.Wrapf(err, "validate meta %q", f.Name()) - } else if n != 8 { + case n != 8: return nil, errors.Errorf("invalid header size %d in %q", n, f.Name()) } @@ -1063,9 +1064,10 @@ func (r *walReader) entry(cr io.Reader) (WALEntryType, byte, []byte, error) { tr := io.TeeReader(cr, r.crc32) b := make([]byte, 6) - if n, err := tr.Read(b); err != nil { + switch n, err := tr.Read(b); { + case err != nil: return 0, 0, nil, err - } else if n != 6 { + case n != 6: return 0, 0, nil, r.corruptionErr("invalid entry header size %d", n) } @@ -1087,15 +1089,17 @@ func (r *walReader) entry(cr io.Reader) (WALEntryType, byte, []byte, error) { } buf := r.buf[:length] - if n, err := tr.Read(buf); err != nil { + switch n, err := tr.Read(buf); { + case err != nil: return 0, 0, nil, err - } else if n != length { + case n != length: return 0, 0, nil, r.corruptionErr("invalid entry body size %d", n) } - if n, err := cr.Read(b[:4]); err != nil { + switch n, err := cr.Read(b[:4]); { + case err != nil: return 0, 0, nil, err - } else if n != 4 { + case n != 4: return 0, 0, nil, r.corruptionErr("invalid checksum length %d", n) } if exp, has := binary.BigEndian.Uint32(b[:4]), r.crc32.Sum32(); has != exp { diff --git a/tsdb/wlog/live_reader.go b/tsdb/wlog/live_reader.go index 29467aef4..0ca69093a 100644 --- a/tsdb/wlog/live_reader.go +++ b/tsdb/wlog/live_reader.go @@ -126,9 +126,10 @@ func (r *LiveReader) Next() bool { // we return EOF and the user can try again later. If we have a full // page, buildRecord is guaranteed to return a record or a non-EOF; it // has checks the records fit in pages. - if ok, err := r.buildRecord(); ok { + switch ok, err := r.buildRecord(); { + case ok: return true - } else if err != nil && err != io.EOF { + case err != nil && err != io.EOF: r.err = err return false } diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index 72121283d..b0c17dcba 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -405,9 +405,10 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { // Ignore errors reading to end of segment whilst replaying the WAL. if !tail { - if err != nil && errors.Cause(err) != io.EOF { + switch { + case err != nil && errors.Cause(err) != io.EOF: level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err) - } else if reader.Offset() != size { + case reader.Offset() != size: level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size) } return nil @@ -425,9 +426,10 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { // Ignore all errors reading to end of segment whilst replaying the WAL. if !tail { - if err != nil && errors.Cause(err) != io.EOF { + switch { + case err != nil && errors.Cause(err) != io.EOF: level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err) - } else if reader.Offset() != size { + case reader.Offset() != size: level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size) } return nil diff --git a/util/treecache/treecache.go b/util/treecache/treecache.go index acdd6f7be..bece9d5c8 100644 --- a/util/treecache/treecache.go +++ b/util/treecache/treecache.go @@ -176,11 +176,11 @@ func (tc *ZookeeperTreeCache) loop(path string) { node = childNode } - err := tc.recursiveNodeUpdate(ev.Path, node) - if err != nil { + switch err := tc.recursiveNodeUpdate(ev.Path, node); { + case err != nil: level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", err) failure() - } else if tc.head.data == nil { + case tc.head.data == nil: level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", "path no longer exists", "path", tc.prefix) failure() } @@ -214,13 +214,14 @@ func (tc *ZookeeperTreeCache) loop(path string) { func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTreeCacheNode) error { data, _, dataWatcher, err := tc.conn.GetW(path) - if errors.Is(err, zk.ErrNoNode) { + switch { + case errors.Is(err, zk.ErrNoNode): tc.recursiveDelete(path, node) if node == tc.head { return fmt.Errorf("path %s does not exist", path) } return nil - } else if err != nil { + case err != nil: return err } @@ -230,10 +231,11 @@ func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTr } children, _, childWatcher, err := tc.conn.ChildrenW(path) - if errors.Is(err, zk.ErrNoNode) { + switch { + case errors.Is(err, zk.ErrNoNode): tc.recursiveDelete(path, node) return nil - } else if err != nil { + case err != nil: return err } diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 0624cf2d8..f4d6d27da 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -989,12 +989,14 @@ func (api *API) targets(r *http.Request) apiFuncResult { ScrapeURL: target.URL().String(), GlobalURL: globalURL.String(), LastError: func() string { - if err == nil && lastErrStr == "" { + switch { + case err == nil && lastErrStr == "": return "" - } else if err != nil { + case err != nil: return errors.Wrapf(err, lastErrStr).Error() + default: + return lastErrStr } - return lastErrStr }(), LastScrape: target.LastScrape(), LastScrapeDuration: target.LastScrapeDuration().Seconds(), diff --git a/web/federate_test.go b/web/federate_test.go index 76d4b9cf6..61ef62f46 100644 --- a/web/federate_test.go +++ b/web/federate_test.go @@ -388,13 +388,13 @@ func TestFederationWithNativeHistograms(t *testing.T) { break } require.NoError(t, err) - if et == textparse.EntryHelp { - metricFamilies++ - } if et == textparse.EntryHistogram || et == textparse.EntrySeries { p.Metric(&l) } - if et == textparse.EntryHistogram { + switch et { + case textparse.EntryHelp: + metricFamilies++ + case textparse.EntryHistogram: _, parsedTimestamp, h, fh := p.Histogram() require.Nil(t, h) actVec = append(actVec, promql.Sample{ @@ -402,7 +402,7 @@ func TestFederationWithNativeHistograms(t *testing.T) { H: fh, Metric: l, }) - } else if et == textparse.EntrySeries { + case textparse.EntrySeries: _, parsedTimestamp, f := p.Series() actVec = append(actVec, promql.Sample{ T: *parsedTimestamp, From 1f044154940dfc380e9ce6dd70bae3dc772af14c Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 18 Apr 2023 11:14:54 +0200 Subject: [PATCH 089/632] Update many Go dependencies Ran `make update-go-deps` then hand-edited to remove any downgrades. Then backed out changes to: * Azure: still waiting someone to test this. * Kubernetes: needs update elsewhere to klog. * kube-openapi: it doesn't compile with previous Go version. Signed-off-by: Bryan Boreham --- go.mod | 50 +++++++++++++-------------- go.sum | 104 +++++++++++++++++++++++++++++---------------------------- 2 files changed, 78 insertions(+), 76 deletions(-) diff --git a/go.mod b/go.mod index 92cc3931c..7b309b03f 100644 --- a/go.mod +++ b/go.mod @@ -8,46 +8,46 @@ require ( github.com/Azure/go-autorest/autorest/adal v0.9.23 github.com/alecthomas/kingpin/v2 v2.3.2 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 - github.com/aws/aws-sdk-go v1.44.217 + github.com/aws/aws-sdk-go v1.44.245 github.com/cespare/xxhash/v2 v2.2.0 github.com/dennwc/varint v1.0.0 github.com/digitalocean/godo v1.98.0 - github.com/docker/docker v23.0.1+incompatible + github.com/docker/docker v23.0.4+incompatible github.com/edsrzf/mmap-go v1.1.0 github.com/envoyproxy/go-control-plane v0.11.0 - github.com/envoyproxy/protoc-gen-validate v0.9.1 + github.com/envoyproxy/protoc-gen-validate v0.10.1 github.com/fsnotify/fsnotify v1.6.0 github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.6.0 - github.com/go-openapi/strfmt v0.21.3 + github.com/go-openapi/strfmt v0.21.7 github.com/go-zookeeper/zk v1.0.3 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10 - github.com/gophercloud/gophercloud v1.2.0 + github.com/google/pprof v0.0.0-20230406165453-00490a63f317 + github.com/gophercloud/gophercloud v1.3.0 github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/consul/api v1.20.0 - github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b - github.com/hetznercloud/hcloud-go v1.41.0 - github.com/ionos-cloud/sdk-go/v6 v6.1.5 + github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 + github.com/hetznercloud/hcloud-go v1.42.0 + github.com/ionos-cloud/sdk-go/v6 v6.1.6 github.com/json-iterator/go v1.1.12 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.14.1 + github.com/linode/linodego v1.16.1 github.com/miekg/dns v1.1.53 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/oklog/run v1.1.0 github.com/oklog/ulid v1.3.1 - github.com/ovh/go-ovh v1.3.0 + github.com/ovh/go-ovh v1.4.1 github.com/pkg/errors v0.9.1 github.com/prometheus/alertmanager v0.25.0 - github.com/prometheus/client_golang v1.14.0 + github.com/prometheus/client_golang v1.15.0 github.com/prometheus/client_model v0.3.0 github.com/prometheus/common v0.42.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/exporter-toolkit v0.9.1 - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/stretchr/testify v1.8.2 github.com/vultr/govultr/v2 v2.17.2 @@ -59,18 +59,18 @@ require ( go.opentelemetry.io/otel/sdk v1.14.0 go.opentelemetry.io/otel/trace v1.14.0 go.uber.org/atomic v1.10.0 - go.uber.org/automaxprocs v1.5.1 + go.uber.org/automaxprocs v1.5.2 go.uber.org/goleak v1.2.1 - golang.org/x/net v0.8.0 - golang.org/x/oauth2 v0.6.0 + golang.org/x/net v0.9.0 + golang.org/x/oauth2 v0.7.0 golang.org/x/sync v0.1.0 - golang.org/x/sys v0.6.0 + golang.org/x/sys v0.7.0 golang.org/x/time v0.3.0 - golang.org/x/tools v0.7.0 + golang.org/x/tools v0.8.0 google.golang.org/api v0.114.0 google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 google.golang.org/grpc v1.53.0 - google.golang.org/protobuf v1.29.1 + google.golang.org/protobuf v1.30.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.26.2 @@ -85,7 +85,7 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/rogpeppe/go-internal v1.9.0 // indirect + github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect ) @@ -166,16 +166,16 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - go.mongodb.org/mongo-driver v1.11.2 // indirect + go.mongodb.org/mongo-driver v1.11.3 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 // indirect go.opentelemetry.io/otel/metric v0.37.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect golang.org/x/crypto v0.7.0 // indirect - golang.org/x/exp v0.0.0-20230307190834-24139beb5833 - golang.org/x/mod v0.9.0 // indirect - golang.org/x/term v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect + golang.org/x/exp v0.0.0-20230321023759-10a507213a29 + golang.org/x/mod v0.10.0 // indirect + golang.org/x/term v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 7ea1be034..c699aa9df 100644 --- a/go.sum +++ b/go.sum @@ -98,8 +98,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.217 h1:FcWC56MRl+k756aH3qeMQTylSdeJ58WN0iFz3fkyRz0= -github.com/aws/aws-sdk-go v1.44.217/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.245 h1:KtY2s4q31/kn33AdV63R5t77mdxsI7rq3YT7Mgo805M= +github.com/aws/aws-sdk-go v1.44.245/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -154,8 +154,8 @@ github.com/digitalocean/godo v1.98.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzK github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v23.0.1+incompatible h1:vjgvJZxprTTE1A37nm+CLNAdwu6xZekyoiVlUZEINcY= -github.com/docker/docker v23.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v23.0.4+incompatible h1:Kd3Bh9V/rO+XpTP/BLqM+gx8z7+Yb0AA2Ibj+nNo4ek= +github.com/docker/docker v23.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -180,8 +180,8 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -245,8 +245,9 @@ github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxR github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtKG7o= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= +github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= @@ -366,8 +367,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10 h1:CqYfpuYIjnlNxM3msdyPRKabhXZWbKjf3Q8BWROFBso= -github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= +github.com/google/pprof v0.0.0-20230406165453-00490a63f317 h1:hFhpt7CTmR3DX+b4R19ydQFtofxT0Sv3QsKNMVQYTMQ= +github.com/google/pprof v0.0.0-20230406165453-00490a63f317/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -380,8 +381,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= -github.com/gophercloud/gophercloud v1.2.0 h1:1oXyj4g54KBg/kFtCdMM6jtxSzeIyg8wv4z1HoGPp1E= -github.com/gophercloud/gophercloud v1.2.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.3.0 h1:RUKyCMiZoQR3VlVR5E3K7PK1AC3/qppsWYo6dtBiqs8= +github.com/gophercloud/gophercloud v1.3.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -450,13 +451,13 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b h1:EkuSTU8c/63q4LMayj8ilgg/4I5PXDFVcnqKfs9qcwI= -github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b/go.mod h1:bKUb1ytds5KwUioHdvdq9jmrDqCThv95si0Ub7iNeBg= +github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 h1:I5xhKLePXpXgM6pZ4xZNTiurLLS3sGuZrZFFzAbM67A= +github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197/go.mod h1:2TCrNvonL09r7EiQ6M2rNt+Cmjbn1QbzchFoTWJFpj4= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go v1.41.0 h1:KJGFRRc68QiVu4PrEP5BmCQVveCP2CM26UGQUKGpIUs= -github.com/hetznercloud/hcloud-go v1.41.0/go.mod h1:NaHg47L6C77mngZhwBG652dTAztYrsZ2/iITJKhQkHA= +github.com/hetznercloud/hcloud-go v1.42.0 h1:Es/CDOForQN3nOOP5Vxh1N/YHjpCg386iYEX5zCgi+A= +github.com/hetznercloud/hcloud-go v1.42.0/go.mod h1:YADL8AbmQYH0Eo+1lkuyoc8LutT0UeMvaKP47nNUb+Y= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -464,8 +465,9 @@ github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ionos-cloud/sdk-go/v6 v6.1.5 h1:BFqThLOgrGJWeo7w6UDyYuNxyi/GqEmNPl7C/YcQ8Fw= -github.com/ionos-cloud/sdk-go/v6 v6.1.5/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/ionos-cloud/sdk-go/v6 v6.1.6 h1:0n4irdqNska+1s3YMCRhrAqKbibEgQ7SwwhAlHzYT5A= +github.com/ionos-cloud/sdk-go/v6 v6.1.6/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -513,8 +515,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.14.1 h1:uGxQyy0BidoEpLGdvfi4cPgEW+0YUFsEGrLEhcTfjNc= -github.com/linode/linodego v1.14.1/go.mod h1:NJlzvlNtdMRRkXb0oN6UWzUkj6t+IBsyveHgZ5Ppjyk= +github.com/linode/linodego v1.16.1 h1:5otq57M4PdHycPERRfSFZ0s1yz1ETVWGjCp3hh7+F9w= +github.com/linode/linodego v1.16.1/go.mod h1:aESRAbpLY9R6IA1WGAWHikRI9DU9Lhesapv1MhKmPHM= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -543,6 +545,7 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= @@ -614,8 +617,8 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/ovh/go-ovh v1.3.0 h1:mvZaddk4E4kLcXhzb+cxBsMPYp2pHqiQpWYkInsuZPQ= -github.com/ovh/go-ovh v1.3.0/go.mod h1:AxitLZ5HBRPyUd+Zl60Ajaag+rNTdVXWIkzfrVuTXWA= +github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM= +github.com/ovh/go-ovh v1.4.1/go.mod h1:6bL6pPyUT7tBfI0pqOegJgRjgjuO+mOo+MyXd1EEC0M= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -644,8 +647,8 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM= +github.com/prometheus/client_golang v1.15.0/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -683,16 +686,16 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 h1:yFl3jyaSVLNYXlnNYM5z2pagEk1dYQhfr1p20T1NyKY= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 h1:Y7xOFbD+3jaPw+VN7lkakNJ/pa+ZSQVFp1ONtJaBxns= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shoenig/test v0.6.2 h1:tdq+WGnznwE5xcOMXkqqXuudK75RkSGBazBGcP1lX6w= +github.com/shoenig/test v0.6.3 h1:GVXWJFk9PiOjN0KoJ7VrJGH6uLPnqxR7/fe3HUPfE0c= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -766,8 +769,8 @@ go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mI go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.11.2 h1:+1v2rDQUWNcGW7/7E0Jvdz51V38XXxJfhzbV17aNHCw= -go.mongodb.org/mongo-driver v1.11.2/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8= +go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y= +go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -802,8 +805,8 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.5.1 h1:e1YG66Lrk73dn4qhg8WFSvhF0JuFQF0ERIp4rpuV8Qk= -go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU= +go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME= +go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -839,8 +842,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230307190834-24139beb5833 h1:SChBja7BCQewoTAU7IgvucQKMIXrEpFxNMs0spT3/5s= -golang.org/x/exp v0.0.0-20230307190834-24139beb5833/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -862,8 +865,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -911,8 +914,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -920,8 +923,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1003,14 +1006,14 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1021,8 +1024,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1082,8 +1085,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= +golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1183,8 +1186,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM= -google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1198,7 +1201,6 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= From 74e6668e87adc1a0d0833a40f96f7f064c2012ae Mon Sep 17 00:00:00 2001 From: gotjosh Date: Thu, 20 Apr 2023 09:34:05 +0100 Subject: [PATCH 090/632] update docs Signed-off-by: gotjosh --- docs/querying/api.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index 820414fb1..ef7fa54c6 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -675,9 +675,9 @@ GET /api/v1/rules URL query parameters: - `type=alert|record`: return only the alerting rules (e.g. `type=alert`) or the recording rules (e.g. `type=record`). When the parameter is absent or empty, no filtering is done. -- `rule_name[]=`: only return rules with the given rule name. If the parameter is repeated, rules with any of provided names are returned. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. -- `rule_group[]=`: only return rules with the given rule group name. If the parameter is repeated, rules with any of provided rule group names are returned. When the parameter is absent or empty, no filtering is done. -- `file[]=`: only return rules with the given filepath. If the parameter is repeated, rules with any of provided filepaths are returned. When the parameter is absent or empty, no filtering is done. +- `rule_name[]=`: only return rules with the given rule name. If the parameter is repeated, rules with any of the provided names are returned. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. +- `rule_group[]=`: only return rules with the given rule group name. If the parameter is repeated, rules with any of the provided rule group names are returned. When the parameter is absent or empty, no filtering is done. +- `file[]=`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done. ```json $ curl http://localhost:9090/api/v1/rules From e78be38cc08231c0a151f4672b3a049f66c83f11 Mon Sep 17 00:00:00 2001 From: gotjosh Date: Thu, 20 Apr 2023 11:20:10 +0100 Subject: [PATCH 091/632] don't show empty groups Signed-off-by: gotjosh --- web/api/v1/api.go | 8 +++++--- web/api/v1/api_test.go | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 9a13e09d9..e700f7120 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1309,7 +1309,7 @@ func (api *API) rules(r *http.Request) apiFuncResult { fSet := queryFormToSet(r.Form["file[]"]) ruleGroups := api.rulesRetriever(r.Context()).RuleGroups() - res := &RuleDiscovery{RuleGroups: make([]*RuleGroup, len(ruleGroups))} + res := &RuleDiscovery{RuleGroups: make([]*RuleGroup, 0, len(ruleGroups))} typ := strings.ToLower(r.URL.Query().Get("type")) if typ != "" && typ != "alert" && typ != "record" { @@ -1319,7 +1319,8 @@ func (api *API) rules(r *http.Request) apiFuncResult { returnAlerts := typ == "" || typ == "alert" returnRecording := typ == "" || typ == "record" - for i, grp := range ruleGroups { + rgs := make([]*RuleGroup, 0, len(ruleGroups)) + for _, grp := range ruleGroups { if len(rgSet) > 0 { if _, ok := rgSet[grp.Name()]; !ok { continue @@ -1400,9 +1401,10 @@ func (api *API) rules(r *http.Request) apiFuncResult { // If the rule group response has no rules, skip it - this means we filtered all the rules of this group. if len(apiRuleGroup.Rules) > 0 { - res.RuleGroups[i] = apiRuleGroup + rgs = append(rgs, apiRuleGroup) } } + res.RuleGroups = rgs return apiFuncResult{res, nil, nil, nil} } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index c3e1bf59d..53ea21182 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -2003,7 +2003,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E { endpoint: api.rules, query: url.Values{"rule_group[]": []string{"respond-with-nothing"}}, - response: &RuleDiscovery{RuleGroups: []*RuleGroup{nil}}, + response: &RuleDiscovery{RuleGroups: []*RuleGroup{}}, }, { endpoint: api.rules, From 637235f0a6af483ddf45e16c5ad800fee5bb946b Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Thu, 20 Apr 2023 16:19:50 +0200 Subject: [PATCH 092/632] Revert type casting removal This reverts the removal of type casting due to an error in the dragonfly integration. The change in the type casting introduced by the commit causes a type mismatch, resulting in the following errors: util/runtime/limits_default.go:42:57: cannot use rlimit.Cur (variable of type int64) as type uint64 in argument to limitToString util/runtime/limits_default.go:42:90: cannot use rlimit.Max (variable of type int64) as type uint64 in argument to limitToString Reverting this commit to resolve the type mismatch error and maintain compatibility with the dragonfly integration. Signed-off-by: Julien Pivotto --- util/runtime/limits_default.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/util/runtime/limits_default.go b/util/runtime/limits_default.go index 1588a93c7..cd0ce732f 100644 --- a/util/runtime/limits_default.go +++ b/util/runtime/limits_default.go @@ -39,7 +39,9 @@ func getLimits(resource int, unit string) string { if err != nil { panic("syscall.Getrlimit failed: " + err.Error()) } - return fmt.Sprintf("(soft=%s, hard=%s)", limitToString(rlimit.Cur, unit), limitToString(rlimit.Max, unit)) + // rlimit.Cur and rlimit.Max are int64 on some platforms, such as dragonfly. + // We need to cast them explicitly to uint64. + return fmt.Sprintf("(soft=%s, hard=%s)", limitToString(uint64(rlimit.Cur), unit), limitToString(uint64(rlimit.Max), unit)) //nolint:unconvert } // FdLimits returns the soft and hard limits for file descriptors. From 7e9acc2e46452da9b259d2253ba6fc8ae9bd7e95 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Thu, 20 Apr 2023 18:43:51 +0200 Subject: [PATCH 093/632] golangci-lint: remove skip-cache and restore singleCaseSwitch rule Signed-off-by: Matthieu MOREL --- .github/workflows/ci.yml | 1 - .golangci.yml | 3 --- promql/engine.go | 9 +++------ 3 files changed, 3 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 07b1242c2..ee63ee300 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -150,7 +150,6 @@ jobs: uses: golangci/golangci-lint-action@v3.4.0 with: args: --verbose - skip-cache: true version: v1.51.2 fuzzing: uses: ./.github/workflows/fuzzing.yml diff --git a/.golangci.yml b/.golangci.yml index c0c20d425..d1cd86ed5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -24,9 +24,6 @@ issues: - linters: - gocritic text: "appendAssign" - - linters: - - gocritic - text: "singleCaseSwitch" - path: _test.go linters: - errcheck diff --git a/promql/engine.go b/promql/engine.go index 910601a88..cbeeb82a1 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -757,8 +757,7 @@ func subqueryTimes(path []parser.Node) (time.Duration, time.Duration, *int64) { ts int64 = math.MaxInt64 ) for _, node := range path { - switch n := node.(type) { - case *parser.SubqueryExpr: + if n, ok := node.(*parser.SubqueryExpr); ok { subqOffset += n.OriginalOffset subqRange += n.Range if n.Timestamp != nil { @@ -847,8 +846,7 @@ func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorS func (ng *Engine) getLastSubqueryInterval(path []parser.Node) time.Duration { var interval time.Duration for _, node := range path { - switch n := node.(type) { - case *parser.SubqueryExpr: + if n, ok := node.(*parser.SubqueryExpr); ok { interval = n.Step if n.Step == 0 { interval = time.Duration(ng.noStepSubqueryIntervalFn(durationMilliseconds(n.Range))) * time.Millisecond @@ -914,8 +912,7 @@ func extractGroupsFromPath(p []parser.Node) (bool, []string) { if len(p) == 0 { return false, nil } - switch n := p[len(p)-1].(type) { - case *parser.AggregateExpr: + if n, ok := p[len(p)-1].(*parser.AggregateExpr); ok { return !n.Without, n.Grouping } return false, nil From ecf6bfa619009255d3cb615d649cb66ca4c217fc Mon Sep 17 00:00:00 2001 From: John Losito Date: Fri, 21 Apr 2023 09:26:16 -0400 Subject: [PATCH 094/632] Update configuration.md Signed-off-by: John Losito --- docs/configuration/configuration.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index f27f8256a..c5ecc292c 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -350,6 +350,7 @@ metric_relabel_configs: # This is an experimental feature, this behaviour could # change or be removed in the future. [ body_size_limit: | default = 0 ] + # Per-scrape limit on number of scraped samples that will be accepted. # If more than this number of samples are present after metric relabeling # the entire scrape will be treated as failed. 0 means no limit. From e9a1e26ab700c1206c749b0b069fff99c8426aac Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Sat, 22 Apr 2023 02:27:15 +0800 Subject: [PATCH 095/632] Perform integer/float histogram type checking on conversions, and use a consistent method for determining integer vs float histogram Signed-off-by: Jeanette Tan --- prompb/custom.go | 5 +++++ storage/remote/codec.go | 24 +++++++++++++++--------- storage/remote/codec_test.go | 2 +- storage/remote/queue_manager_test.go | 2 +- storage/remote/write_handler.go | 2 +- storage/remote/write_handler_test.go | 2 +- 6 files changed, 24 insertions(+), 13 deletions(-) diff --git a/prompb/custom.go b/prompb/custom.go index 4b07187bd..13d6e0f0c 100644 --- a/prompb/custom.go +++ b/prompb/custom.go @@ -20,6 +20,11 @@ import ( func (m Sample) T() int64 { return m.Timestamp } func (m Sample) V() float64 { return m.Value } +func (h Histogram) IsFloatHistogram() bool { + _, ok := h.GetCount().(*Histogram_CountFloat) + return ok +} + func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) { size := r.Size() data, ok := p.Get().(*[]byte) diff --git a/storage/remote/codec.go b/storage/remote/codec.go index 02c84a3e6..9a683b908 100644 --- a/storage/remote/codec.go +++ b/storage/remote/codec.go @@ -455,10 +455,10 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType { } func getHistogramValType(h *prompb.Histogram) chunkenc.ValueType { - if _, isInt := h.GetCount().(*prompb.Histogram_CountInt); isInt { - return chunkenc.ValHistogram + if h.IsFloatHistogram() { + return chunkenc.ValFloatHistogram } - return chunkenc.ValFloatHistogram + return chunkenc.ValHistogram } // At implements chunkenc.Iterator. @@ -624,9 +624,11 @@ func exemplarProtoToExemplar(ep prompb.Exemplar) exemplar.Exemplar { } // HistogramProtoToHistogram extracts a (normal integer) Histogram from the -// provided proto message. The caller has to make sure that the proto message -// represents an integer histogram and not a float histogram. +// provided proto message. func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram { + if hp.IsFloatHistogram() { + panic("don't call HistogramProtoToHistogram on a float histogram") + } return &histogram.Histogram{ CounterResetHint: histogram.CounterResetHint(hp.ResetHint), Schema: hp.Schema, @@ -642,9 +644,11 @@ func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram { } // FloatHistogramProtoToFloatHistogram extracts a float Histogram from the -// provided proto message to a Float Histogram. The caller has to make sure that -// the proto message represents a float histogram and not an integer histogram. +// provided proto message to a Float Histogram. func FloatHistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram { + if !hp.IsFloatHistogram() { + panic("don't call FloatHistogramProtoToFloatHistogram on an integer histogram") + } return &histogram.FloatHistogram{ CounterResetHint: histogram.CounterResetHint(hp.ResetHint), Schema: hp.Schema, @@ -660,9 +664,11 @@ func FloatHistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHi } // HistogramProtoToFloatHistogram extracts and converts a (normal integer) histogram from the provided proto message -// to a float histogram. The caller has to make sure that the proto message represents an integer histogram and not a -// float histogram. +// to a float histogram. func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram { + if hp.IsFloatHistogram() { + panic("don't call HistogramProtoToFloatHistogram on a float histogram") + } return &histogram.FloatHistogram{ CounterResetHint: histogram.CounterResetHint(hp.ResetHint), Schema: hp.Schema, diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index 27e2cc704..dbd5cec21 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -528,7 +528,7 @@ func TestNilHistogramProto(*testing.T) { // This function will panic if it impromperly handles nil // values, causing the test to fail. HistogramProtoToHistogram(prompb.Histogram{}) - FloatHistogramProtoToFloatHistogram(prompb.Histogram{}) + HistogramProtoToFloatHistogram(prompb.Histogram{}) } func exampleHistogram() histogram.Histogram { diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index a57c3bf7b..b43258ff0 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -802,7 +802,7 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte) error { for _, histogram := range ts.Histograms { count++ - if histogram.GetCountFloat() > 0 || histogram.GetZeroCountFloat() > 0 { + if histogram.IsFloatHistogram() { c.receivedFloatHistograms[seriesName] = append(c.receivedFloatHistograms[seriesName], histogram) } else { c.receivedHistograms[seriesName] = append(c.receivedHistograms[seriesName], histogram) diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 5aa113088..1f4c43e59 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -125,7 +125,7 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err } for _, hp := range ts.Histograms { - if hp.GetCountFloat() > 0 || hp.GetZeroCountFloat() > 0 { // It is a float histogram. + if hp.IsFloatHistogram() { fhs := FloatHistogramProtoToFloatHistogram(hp) _, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, fhs) } else { diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index e7a88ddc2..3bce5f1d8 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -68,7 +68,7 @@ func TestRemoteWriteHandler(t *testing.T) { } for _, hp := range ts.Histograms { - if hp.GetCountFloat() > 0 || hp.GetZeroCountFloat() > 0 { // It is a float histogram. + if hp.IsFloatHistogram() { fh := FloatHistogramProtoToFloatHistogram(hp) require.Equal(t, mockHistogram{labels, hp.Timestamp, nil, fh}, appendable.histograms[k]) } else { From 1102ffd188b4a9bc16c2c120c3eeb3dc09a92a99 Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Sat, 22 Apr 2023 02:27:15 +0800 Subject: [PATCH 096/632] Fix according to code review Signed-off-by: Jeanette Tan --- storage/remote/codec.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/storage/remote/codec.go b/storage/remote/codec.go index 9a683b908..6a58ec4ac 100644 --- a/storage/remote/codec.go +++ b/storage/remote/codec.go @@ -624,10 +624,11 @@ func exemplarProtoToExemplar(ep prompb.Exemplar) exemplar.Exemplar { } // HistogramProtoToHistogram extracts a (normal integer) Histogram from the -// provided proto message. +// provided proto message. The caller has to make sure that the proto message +// represents an integer histogram and not a float histogram, or it panics. func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram { if hp.IsFloatHistogram() { - panic("don't call HistogramProtoToHistogram on a float histogram") + panic("HistogramProtoToHistogram called with a float histogram") } return &histogram.Histogram{ CounterResetHint: histogram.CounterResetHint(hp.ResetHint), @@ -644,10 +645,12 @@ func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram { } // FloatHistogramProtoToFloatHistogram extracts a float Histogram from the -// provided proto message to a Float Histogram. +// provided proto message to a Float Histogram. The caller has to make sure that +// the proto message represents a float histogram and not an integer histogram, +// or it panics. func FloatHistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram { if !hp.IsFloatHistogram() { - panic("don't call FloatHistogramProtoToFloatHistogram on an integer histogram") + panic("FloatHistogramProtoToFloatHistogram called with an integer histogram") } return &histogram.FloatHistogram{ CounterResetHint: histogram.CounterResetHint(hp.ResetHint), @@ -664,10 +667,11 @@ func FloatHistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHi } // HistogramProtoToFloatHistogram extracts and converts a (normal integer) histogram from the provided proto message -// to a float histogram. +// to a float histogram. The caller has to make sure that the proto message represents an integer histogram and not a +// float histogram, or it panics. func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram { if hp.IsFloatHistogram() { - panic("don't call HistogramProtoToFloatHistogram on a float histogram") + panic("HistogramProtoToFloatHistogram called with a float histogram") } return &histogram.FloatHistogram{ CounterResetHint: histogram.CounterResetHint(hp.ResetHint), From 4d21ac23e642391a5c7f75962cd5c939679f7dd2 Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Sat, 22 Apr 2023 03:14:19 +0800 Subject: [PATCH 097/632] Implement bucket limit for native histograms Signed-off-by: Jeanette Tan --- config/config.go | 3 ++ scrape/scrape.go | 44 +++++++++++++++++++---- scrape/scrape_test.go | 82 +++++++++++++++++++++++++------------------ scrape/target.go | 26 ++++++++++++++ storage/interface.go | 1 + 5 files changed, 116 insertions(+), 40 deletions(-) diff --git a/config/config.go b/config/config.go index 5c51d5a0d..31ebb288e 100644 --- a/config/config.go +++ b/config/config.go @@ -489,6 +489,9 @@ type ScrapeConfig struct { // More than this label value length post metric-relabeling will cause the // scrape to fail. LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"` + // More than this many buckets in a native histogram will cause the scrape to + // fail. + NativeHistogramBucketLimit uint `yaml:"bucket_limit,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. diff --git a/scrape/scrape.go b/scrape/scrape.go index 5c649e729..7c24c1070 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -191,6 +191,12 @@ var ( }, []string{"scrape_job"}, ) + targetScrapeNativeHistogramBucketLimit = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_histogram_exceeded_bucket_limit_total", + Help: "Total number of native histograms rejected due to exceeding the bucket limit.", + }, + ) ) func init() { @@ -216,6 +222,7 @@ func init() { targetScrapeExemplarOutOfOrder, targetScrapePoolExceededLabelLimits, targetSyncFailed, + targetScrapeNativeHistogramBucketLimit, ) } @@ -256,6 +263,7 @@ type scrapeLoopOptions struct { target *Target scraper scraper sampleLimit int + bucketLimit int labelLimits *labelLimits honorLabels bool honorTimestamps bool @@ -319,6 +327,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed jitterSeed, opts.honorTimestamps, opts.sampleLimit, + opts.bucketLimit, opts.labelLimits, opts.interval, opts.timeout, @@ -412,6 +421,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { timeout = time.Duration(sp.config.ScrapeTimeout) bodySizeLimit = int64(sp.config.BodySizeLimit) sampleLimit = int(sp.config.SampleLimit) + bucketLimit = int(sp.config.NativeHistogramBucketLimit) labelLimits = &labelLimits{ labelLimit: int(sp.config.LabelLimit), labelNameLengthLimit: int(sp.config.LabelNameLengthLimit), @@ -446,6 +456,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { target: t, scraper: s, sampleLimit: sampleLimit, + bucketLimit: bucketLimit, labelLimits: labelLimits, honorLabels: honorLabels, honorTimestamps: honorTimestamps, @@ -530,6 +541,7 @@ func (sp *scrapePool) sync(targets []*Target) { timeout = time.Duration(sp.config.ScrapeTimeout) bodySizeLimit = int64(sp.config.BodySizeLimit) sampleLimit = int(sp.config.SampleLimit) + bucketLimit = int(sp.config.NativeHistogramBucketLimit) labelLimits = &labelLimits{ labelLimit: int(sp.config.LabelLimit), labelNameLengthLimit: int(sp.config.LabelNameLengthLimit), @@ -559,6 +571,7 @@ func (sp *scrapePool) sync(targets []*Target) { target: t, scraper: s, sampleLimit: sampleLimit, + bucketLimit: bucketLimit, labelLimits: labelLimits, honorLabels: honorLabels, honorTimestamps: honorTimestamps, @@ -731,7 +744,7 @@ func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels } // appender returns an appender for ingested samples from the target. -func appender(app storage.Appender, limit int) storage.Appender { +func appender(app storage.Appender, limit, bucketLimit int) storage.Appender { app = &timeLimitAppender{ Appender: app, maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), @@ -744,6 +757,13 @@ func appender(app storage.Appender, limit int) storage.Appender { limit: limit, } } + + if bucketLimit > 0 { + app = &bucketLimitAppender{ + Appender: app, + limit: bucketLimit, + } + } return app } @@ -872,6 +892,7 @@ type scrapeLoop struct { forcedErr error forcedErrMtx sync.Mutex sampleLimit int + bucketLimit int labelLimits *labelLimits interval time.Duration timeout time.Duration @@ -1152,6 +1173,7 @@ func newScrapeLoop(ctx context.Context, jitterSeed uint64, honorTimestamps bool, sampleLimit int, + bucketLimit int, labelLimits *labelLimits, interval time.Duration, timeout time.Duration, @@ -1195,6 +1217,7 @@ func newScrapeLoop(ctx context.Context, appenderCtx: appenderCtx, honorTimestamps: honorTimestamps, sampleLimit: sampleLimit, + bucketLimit: bucketLimit, labelLimits: labelLimits, interval: interval, timeout: timeout, @@ -1462,10 +1485,11 @@ func (sl *scrapeLoop) getCache() *scrapeCache { } type appendErrors struct { - numOutOfOrder int - numDuplicates int - numOutOfBounds int - numExemplarOutOfOrder int + numOutOfOrder int + numDuplicates int + numOutOfBounds int + numExemplarOutOfOrder int + numHistogramBucketLimit int } func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { @@ -1510,7 +1534,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, } // Take an appender with limits. - app = appender(app, sl.sampleLimit) + app = appender(app, sl.sampleLimit, sl.bucketLimit) defer func() { if err != nil { @@ -1693,6 +1717,9 @@ loop: if appErrs.numExemplarOutOfOrder > 0 { level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", appErrs.numExemplarOutOfOrder) } + if appErrs.numHistogramBucketLimit > 0 { + level.Warn(sl.l).Log("msg", "Error on ingesting native histograms that exceeded bucket limit", "num_dropped", appErrs.numHistogramBucketLimit) + } if err == nil { sl.cache.forEachStale(func(lset labels.Labels) bool { // Series no longer exposed, mark it stale. @@ -1735,6 +1762,11 @@ func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err e level.Debug(sl.l).Log("msg", "Out of bounds metric", "series", string(met)) targetScrapeSampleOutOfBounds.Inc() return false, nil + case storage.ErrHistogramBucketLimit: + appErrs.numHistogramBucketLimit++ + level.Debug(sl.l).Log("msg", "Exceeded bucket limit for native histograms", "series", string(met)) + targetScrapeNativeHistogramBucketLimit.Inc() + return false, nil case errSampleLimit: // Keep on parsing output if we hit the limit, so we report the correct // total number of samples scraped. diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 2a2ca0948..c815bf5c6 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -487,7 +487,7 @@ func TestScrapePoolAppender(t *testing.T) { appl, ok := loop.(*scrapeLoop) require.True(t, ok, "Expected scrapeLoop but got %T", loop) - wrapped := appender(appl.appender(context.Background()), 0) + wrapped := appender(appl.appender(context.Background()), 0, 0) tl, ok := wrapped.(*timeLimitAppender) require.True(t, ok, "Expected timeLimitAppender but got %T", wrapped) @@ -503,7 +503,7 @@ func TestScrapePoolAppender(t *testing.T) { appl, ok = loop.(*scrapeLoop) require.True(t, ok, "Expected scrapeLoop but got %T", loop) - wrapped = appender(appl.appender(context.Background()), sampleLimit) + wrapped = appender(appl.appender(context.Background()), sampleLimit, 0) sl, ok := wrapped.(*limitAppender) require.True(t, ok, "Expected limitAppender but got %T", wrapped) @@ -513,6 +513,20 @@ func TestScrapePoolAppender(t *testing.T) { _, ok = tl.Appender.(nopAppender) require.True(t, ok, "Expected base appender but got %T", tl.Appender) + + wrapped = appender(appl.appender(context.Background()), sampleLimit, 100) + + bl, ok := wrapped.(*bucketLimitAppender) + require.True(t, ok, "Expected bucketLimitAppender but got %T", wrapped) + + sl, ok = bl.Appender.(*limitAppender) + require.True(t, ok, "Expected limitAppender but got %T", bl) + + tl, ok = sl.Appender.(*timeLimitAppender) + require.True(t, ok, "Expected timeLimitAppender but got %T", sl.Appender) + + _, ok = tl.Appender.(nopAppender) + require.True(t, ok, "Expected base appender but got %T", tl.Appender) } func TestScrapePoolRaces(t *testing.T) { @@ -610,7 +624,7 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) { nopMutator, nil, nil, 0, true, - 0, + 0, 0, nil, 1, 0, @@ -682,7 +696,7 @@ func TestScrapeLoopStop(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, 10*time.Millisecond, time.Hour, @@ -758,7 +772,7 @@ func TestScrapeLoopRun(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, time.Second, time.Hour, @@ -813,7 +827,7 @@ func TestScrapeLoopRun(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, time.Second, 100*time.Millisecond, @@ -872,7 +886,7 @@ func TestScrapeLoopForcedErr(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, time.Second, time.Hour, @@ -930,7 +944,7 @@ func TestScrapeLoopMetadata(t *testing.T) { cache, 0, true, - 0, + 0, 0, nil, 0, 0, @@ -987,7 +1001,7 @@ func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) { nil, 0, true, - 0, + 0, 0, nil, 0, 0, @@ -1047,7 +1061,7 @@ func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, 0, 0, @@ -1125,7 +1139,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, 10*time.Millisecond, time.Hour, @@ -1188,7 +1202,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, 10*time.Millisecond, time.Hour, @@ -1254,7 +1268,7 @@ func TestScrapeLoopCache(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, 10*time.Millisecond, time.Hour, @@ -1337,7 +1351,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, 10*time.Millisecond, time.Hour, @@ -1451,7 +1465,7 @@ func TestScrapeLoopAppend(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, 0, 0, @@ -1546,7 +1560,7 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) { return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil) }, nil, - func(ctx context.Context) storage.Appender { return app }, nil, 0, true, 0, nil, 0, 0, false, false, nil, false, + func(ctx context.Context) storage.Appender { return app }, nil, 0, true, 0, 0, nil, 0, 0, false, false, nil, false, ) slApp := sl.appender(context.Background()) _, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)) @@ -1577,7 +1591,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, 0, 0, @@ -1635,7 +1649,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) { nil, 0, true, - app.limit, + app.limit, 0, nil, 0, 0, @@ -1712,7 +1726,7 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, 0, 0, @@ -1760,7 +1774,7 @@ func TestScrapeLoopAppendStaleness(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, 0, 0, @@ -1811,7 +1825,7 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, 0, 0, @@ -1922,7 +1936,7 @@ metric_total{n="2"} 2 # {t="2"} 2.0 20000 nil, 0, true, - 0, + 0, 0, nil, 0, 0, @@ -1987,7 +2001,7 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, 0, 0, @@ -2039,7 +2053,7 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, 10*time.Millisecond, time.Hour, @@ -2075,7 +2089,7 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, 10*time.Millisecond, time.Hour, @@ -2124,7 +2138,7 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T nil, 0, true, - 0, + 0, 0, nil, 0, 0, @@ -2169,7 +2183,7 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, 0, 0, @@ -2441,7 +2455,7 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) { func(ctx context.Context) storage.Appender { return capp }, nil, 0, true, - 0, + 0, 0, nil, 0, 0, @@ -2482,7 +2496,7 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) { func(ctx context.Context) storage.Appender { return capp }, nil, 0, false, - 0, + 0, 0, nil, 0, 0, @@ -2522,7 +2536,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, 0, 0, @@ -2580,7 +2594,7 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, 0, 0, @@ -2843,7 +2857,7 @@ func TestScrapeAddFast(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, 0, 0, @@ -2929,7 +2943,7 @@ func TestScrapeReportSingleAppender(t *testing.T) { nil, 0, true, - 0, + 0, 0, nil, 10*time.Millisecond, time.Hour, @@ -3131,7 +3145,7 @@ func TestScrapeLoopLabelLimit(t *testing.T) { nil, 0, true, - 0, + 0, 0, &test.labelLimits, 0, 0, diff --git a/scrape/target.go b/scrape/target.go index 6c4703118..f916e4549 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -27,6 +27,7 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/model/textparse" @@ -355,6 +356,31 @@ func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels, return ref, nil } +// bucketLimitAppender limits the number of total appended samples in a batch. +type bucketLimitAppender struct { + storage.Appender + + limit int +} + +func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if h != nil { + if len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit { + return 0, storage.ErrHistogramBucketLimit + } + } + if fh != nil { + if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit { + return 0, storage.ErrHistogramBucketLimit + } + } + ref, err := app.Appender.AppendHistogram(ref, lset, t, h, fh) + if err != nil { + return 0, err + } + return ref, nil +} + // PopulateLabels builds a label set from the given label set and scrape configuration. // It returns a label set before relabeling was applied as the second return value. // Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling. diff --git a/storage/interface.go b/storage/interface.go index b282f1fc6..18119f254 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -46,6 +46,7 @@ var ( ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative") ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative") ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided") + ErrHistogramBucketLimit = errors.New("histogram bucket limit exceeded") ) // SeriesRef is a generic series reference. In prometheus it is either a From d3ad158a660de06201d28a384cc2175e0488bab2 Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Sat, 22 Apr 2023 03:14:19 +0800 Subject: [PATCH 098/632] Update docs and comments Signed-off-by: Jeanette Tan --- config/config.go | 4 ++-- docs/configuration/configuration.md | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/config/config.go b/config/config.go index 31ebb288e..956e1c6c6 100644 --- a/config/config.go +++ b/config/config.go @@ -489,8 +489,8 @@ type ScrapeConfig struct { // More than this label value length post metric-relabeling will cause the // scrape to fail. LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"` - // More than this many buckets in a native histogram will cause the scrape to - // fail. + // More than this many buckets in a native histogram will cause the histogram + // to be ignored, but it will not make the whole scrape fail. NativeHistogramBucketLimit uint `yaml:"bucket_limit,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index f27f8256a..f51227320 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -376,6 +376,11 @@ metric_relabel_configs: # 0 means no limit. This is an experimental feature, this behaviour could # change in the future. [ target_limit: | default = 0 ] + +# Limit on total number of positive and negative buckets allowed in a native +# histogram. If this is exceeded, the histogram will be ignored, but this will +# not make the scrape fail. 0 means no limit. +[ sample_limit: | default = 0 ] ``` Where `` must be unique across all scrape configurations. From 071426f72f3546149b8d6d6ae3c77bf044e8fc93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Sat, 22 Apr 2023 03:14:19 +0800 Subject: [PATCH 099/632] Add unit test for bucket limit appender MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Refactors textparser test to use a common test utility to create protobuf representation from MetricFamily Signed-off-by: György Krajcsovits --- model/textparse/protobufparse_test.go | 12 +--- scrape/scrape_test.go | 90 +++++++++++++++++++++++++++ util/testutil/protobuf.go | 50 +++++++++++++++ 3 files changed, 143 insertions(+), 9 deletions(-) create mode 100644 util/testutil/protobuf.go diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index 90c6a90f3..fb8669197 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -15,7 +15,6 @@ package textparse import ( "bytes" - "encoding/binary" "errors" "io" "testing" @@ -26,8 +25,9 @@ import ( "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/util/testutil" - dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" + dto "github.com/prometheus/client_model/go" ) func TestProtobufParse(t *testing.T) { @@ -449,7 +449,6 @@ metric: < `, } - varintBuf := make([]byte, binary.MaxVarintLen32) inputBuf := &bytes.Buffer{} for _, tmf := range textMetricFamilies { @@ -457,13 +456,8 @@ metric: < // From text to proto message. require.NoError(t, proto.UnmarshalText(tmf, pb)) // From proto message to binary protobuf. - protoBuf, err := proto.Marshal(pb) + err := testutil.AddMetricFamilyToProtobuf(inputBuf, pb) require.NoError(t, err) - - // Write first length, then binary protobuf. - varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) - inputBuf.Write(varintBuf[:varintLength]) - inputBuf.Write(protoBuf) } exp := []struct { diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index c815bf5c6..b5cabea52 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -30,6 +30,7 @@ import ( "github.com/go-kit/log" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -1709,6 +1710,95 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) { require.Equal(t, 0, seriesAdded) } +func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { + resApp := &collectResultAppender{} + app := &bucketLimitAppender{Appender: resApp, limit: 2} + + sl := newScrapeLoop(context.Background(), + nil, nil, nil, + func(l labels.Labels) labels.Labels { + if l.Has("deleteme") { + return labels.EmptyLabels() + } + return l + }, + nopMutator, + func(ctx context.Context) storage.Appender { return app }, + nil, + 0, + true, + app.limit, 0, + nil, + 0, + 0, + false, + false, + nil, + false, + ) + + metric := dto.Metric{} + err := targetScrapeNativeHistogramBucketLimit.Write(&metric) + require.NoError(t, err) + beforeMetricValue := metric.GetCounter().GetValue() + + nativeHistogram := prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "testing", + Name: "example_native_histogram", + Help: "This is used for testing", + ConstLabels: map[string]string{"some": "value"}, + NativeHistogramBucketFactor: 1.1, // 10% increase from bucket to bucket + NativeHistogramMaxBucketNumber: 100, // intentionally higher than the limit we'll use in the scraper + }) + registry := prometheus.NewRegistry() + registry.Register(nativeHistogram) + nativeHistogram.Observe(1.0) + nativeHistogram.Observe(10.0) // in different bucket since > 1*1.1 + + gathered, err := registry.Gather() + require.NoError(t, err) + require.NotEmpty(t, gathered) + + histogramMetricFamily := gathered[0] + msg, err := testutil.MetricFamilyToProtobuf(histogramMetricFamily) + require.NoError(t, err) + + now := time.Now() + total, added, seriesAdded, err := sl.append(app, msg, "application/vnd.google.protobuf", now) + require.NoError(t, err) + require.Equal(t, 1, total) + require.Equal(t, 1, added) + require.Equal(t, 1, seriesAdded) + + err = targetScrapeNativeHistogramBucketLimit.Write(&metric) + require.NoError(t, err) + metricValue := metric.GetCounter().GetValue() + require.Equal(t, beforeMetricValue, metricValue) + beforeMetricValue = metricValue + + nativeHistogram.Observe(100.0) // in different bucket since > 10*1.1 + + gathered, err = registry.Gather() + require.NoError(t, err) + require.NotEmpty(t, gathered) + + histogramMetricFamily = gathered[0] + msg, err = testutil.MetricFamilyToProtobuf(histogramMetricFamily) + require.NoError(t, err) + + now = time.Now() + total, added, seriesAdded, err = sl.append(app, msg, "application/vnd.google.protobuf", now) + require.NoError(t, err) + require.Equal(t, 1, total) + require.Equal(t, 1, added) + require.Equal(t, 0, seriesAdded) + + err = targetScrapeNativeHistogramBucketLimit.Write(&metric) + require.NoError(t, err) + metricValue = metric.GetCounter().GetValue() + require.Equal(t, beforeMetricValue+1, metricValue) +} + func TestScrapeLoop_ChangingMetricString(t *testing.T) { // This is a regression test for the scrape loop cache not properly maintaining // IDs when the string representation of a metric changes across a scrape. Thus diff --git a/util/testutil/protobuf.go b/util/testutil/protobuf.go new file mode 100644 index 000000000..8650f1373 --- /dev/null +++ b/util/testutil/protobuf.go @@ -0,0 +1,50 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "bytes" + "encoding/binary" + + "github.com/gogo/protobuf/proto" + dto "github.com/prometheus/client_model/go" +) + +// Write a MetricFamily into a protobuf +func MetricFamilyToProtobuf(metricFamily *dto.MetricFamily) ([]byte, error) { + buffer := &bytes.Buffer{} + err := AddMetricFamilyToProtobuf(buffer, metricFamily) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// Append a MetricFamily protobuf representation to a buffer +func AddMetricFamilyToProtobuf(buffer *bytes.Buffer, metricFamily *dto.MetricFamily) error { + protoBuf, err := proto.Marshal(metricFamily) + if err != nil { + return err + } + + varintBuf := make([]byte, binary.MaxVarintLen32) + varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) + + _, err = buffer.Write(varintBuf[:varintLength]) + if err != nil { + return err + } + _, err = buffer.Write(protoBuf) + return err +} From 2ad39baa726958d9688ae2094bcedf7002c46c56 Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Sat, 22 Apr 2023 03:14:19 +0800 Subject: [PATCH 100/632] Treat bucket limit like sample limit and make it fail the whole scrape and return an error Signed-off-by: Jeanette Tan --- config/config.go | 4 +-- docs/configuration/configuration.md | 4 +-- scrape/scrape.go | 40 ++++++++++++++++------------- scrape/scrape_test.go | 7 +++-- scrape/target.go | 9 ++++--- storage/interface.go | 1 - 6 files changed, 37 insertions(+), 28 deletions(-) diff --git a/config/config.go b/config/config.go index 956e1c6c6..31ebb288e 100644 --- a/config/config.go +++ b/config/config.go @@ -489,8 +489,8 @@ type ScrapeConfig struct { // More than this label value length post metric-relabeling will cause the // scrape to fail. LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"` - // More than this many buckets in a native histogram will cause the histogram - // to be ignored, but it will not make the whole scrape fail. + // More than this many buckets in a native histogram will cause the scrape to + // fail. NativeHistogramBucketLimit uint `yaml:"bucket_limit,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index f51227320..cfb11e21e 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -378,8 +378,8 @@ metric_relabel_configs: [ target_limit: | default = 0 ] # Limit on total number of positive and negative buckets allowed in a native -# histogram. If this is exceeded, the histogram will be ignored, but this will -# not make the scrape fail. 0 means no limit. +# histogram. If this is exceeded, the entire scrape will be treated as failed. +# 0 means no limit. [ sample_limit: | default = 0 ] ``` diff --git a/scrape/scrape.go b/scrape/scrape.go index 7c24c1070..179adbb2a 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -194,7 +194,7 @@ var ( targetScrapeNativeHistogramBucketLimit = prometheus.NewCounter( prometheus.CounterOpts{ Name: "prometheus_target_scrapes_histogram_exceeded_bucket_limit_total", - Help: "Total number of native histograms rejected due to exceeding the bucket limit.", + Help: "Total number of scrapes that hit the native histograms bucket limit and were rejected.", }, ) ) @@ -1485,11 +1485,10 @@ func (sl *scrapeLoop) getCache() *scrapeCache { } type appendErrors struct { - numOutOfOrder int - numDuplicates int - numOutOfBounds int - numExemplarOutOfOrder int - numHistogramBucketLimit int + numOutOfOrder int + numDuplicates int + numOutOfBounds int + numExemplarOutOfOrder int } func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { @@ -1506,6 +1505,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, defTime = timestamp.FromTime(ts) appErrs = appendErrors{} sampleLimitErr error + bucketLimitErr error e exemplar.Exemplar // escapes to heap so hoisted out of loop meta metadata.Metadata metadataChanged bool @@ -1655,7 +1655,7 @@ loop: } else { ref, err = app.Append(ref, lset, t, val) } - sampleAdded, err = sl.checkAddError(ce, met, parsedTimestamp, err, &sampleLimitErr, &appErrs) + sampleAdded, err = sl.checkAddError(ce, met, parsedTimestamp, err, &sampleLimitErr, &bucketLimitErr, &appErrs) if err != nil { if err != storage.ErrNotFound { level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err) @@ -1669,7 +1669,7 @@ loop: sl.cache.trackStaleness(hash, lset) } sl.cache.addRef(met, ref, lset, hash) - if sampleAdded && sampleLimitErr == nil { + if sampleAdded && sampleLimitErr == nil && bucketLimitErr == nil { seriesAdded++ } } @@ -1705,6 +1705,13 @@ loop: // We only want to increment this once per scrape, so this is Inc'd outside the loop. targetScrapeSampleLimit.Inc() } + if bucketLimitErr != nil { + if err == nil { + err = bucketLimitErr // if sample limit is hit, that error takes precedence + } + // We only want to increment this once per scrape, so this is Inc'd outside the loop. + targetScrapeNativeHistogramBucketLimit.Inc() + } if appErrs.numOutOfOrder > 0 { level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order samples", "num_dropped", appErrs.numOutOfOrder) } @@ -1717,9 +1724,6 @@ loop: if appErrs.numExemplarOutOfOrder > 0 { level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", appErrs.numExemplarOutOfOrder) } - if appErrs.numHistogramBucketLimit > 0 { - level.Warn(sl.l).Log("msg", "Error on ingesting native histograms that exceeded bucket limit", "num_dropped", appErrs.numHistogramBucketLimit) - } if err == nil { sl.cache.forEachStale(func(lset labels.Labels) bool { // Series no longer exposed, mark it stale. @@ -1737,8 +1741,8 @@ loop: } // Adds samples to the appender, checking the error, and then returns the # of samples added, -// whether the caller should continue to process more samples, and any sample limit errors. -func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr *error, appErrs *appendErrors) (bool, error) { +// whether the caller should continue to process more samples, and any sample or bucket limit errors. +func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) { switch errors.Cause(err) { case nil: if tp == nil && ce != nil { @@ -1762,16 +1766,16 @@ func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err e level.Debug(sl.l).Log("msg", "Out of bounds metric", "series", string(met)) targetScrapeSampleOutOfBounds.Inc() return false, nil - case storage.ErrHistogramBucketLimit: - appErrs.numHistogramBucketLimit++ - level.Debug(sl.l).Log("msg", "Exceeded bucket limit for native histograms", "series", string(met)) - targetScrapeNativeHistogramBucketLimit.Inc() - return false, nil case errSampleLimit: // Keep on parsing output if we hit the limit, so we report the correct // total number of samples scraped. *sampleLimitErr = err return false, nil + case errBucketLimit: + // Keep on parsing output if we hit the limit, so we report the correct + // total number of samples scraped. + *bucketLimitErr = err + return false, nil default: return false, err } diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index b5cabea52..8822ed96d 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1788,7 +1788,10 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { now = time.Now() total, added, seriesAdded, err = sl.append(app, msg, "application/vnd.google.protobuf", now) - require.NoError(t, err) + if err != errBucketLimit { + t.Fatalf("Did not see expected histogram bucket limit error: %s", err) + } + require.NoError(t, app.Rollback()) require.Equal(t, 1, total) require.Equal(t, 1, added) require.Equal(t, 0, seriesAdded) @@ -3010,7 +3013,7 @@ func TestReuseCacheRace(*testing.T) { func TestCheckAddError(t *testing.T) { var appErrs appendErrors sl := scrapeLoop{l: log.NewNopLogger()} - sl.checkAddError(nil, nil, nil, storage.ErrOutOfOrderSample, nil, &appErrs) + sl.checkAddError(nil, nil, nil, storage.ErrOutOfOrderSample, nil, nil, &appErrs) require.Equal(t, 1, appErrs.numOutOfOrder) } diff --git a/scrape/target.go b/scrape/target.go index f916e4549..a655e8541 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -314,7 +314,10 @@ func (ts Targets) Len() int { return len(ts) } func (ts Targets) Less(i, j int) bool { return ts[i].URL().String() < ts[j].URL().String() } func (ts Targets) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } -var errSampleLimit = errors.New("sample limit exceeded") +var ( + errSampleLimit = errors.New("sample limit exceeded") + errBucketLimit = errors.New("histogram bucket limit exceeded") +) // limitAppender limits the number of total appended samples in a batch. type limitAppender struct { @@ -366,12 +369,12 @@ type bucketLimitAppender struct { func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if h != nil { if len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit { - return 0, storage.ErrHistogramBucketLimit + return 0, errBucketLimit } } if fh != nil { if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit { - return 0, storage.ErrHistogramBucketLimit + return 0, errBucketLimit } } ref, err := app.Appender.AppendHistogram(ref, lset, t, h, fh) diff --git a/storage/interface.go b/storage/interface.go index 18119f254..b282f1fc6 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -46,7 +46,6 @@ var ( ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative") ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative") ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided") - ErrHistogramBucketLimit = errors.New("histogram bucket limit exceeded") ) // SeriesRef is a generic series reference. In prometheus it is either a From 13938d0ccc74b6e094cdf9cd88b921285e670081 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sat, 22 Apr 2023 10:16:49 +0200 Subject: [PATCH 101/632] Update test_golang_oldest to 1.19 Since test_go is on 1.20, and it should use the previous version. Signed-off-by: Bryan Boreham --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ee63ee300..bfa1ac00d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -66,7 +66,7 @@ jobs: runs-on: ubuntu-latest # The go verson in this image should be N-1 wrt test_go. container: - image: quay.io/prometheus/golang-builder:1.18-base + image: quay.io/prometheus/golang-builder:1.19-base steps: - uses: actions/checkout@v3 - run: make build From d281ebb178a9c1b51f74cd61afbdf16c1ca5f1f8 Mon Sep 17 00:00:00 2001 From: Vladimir Varankin Date: Tue, 28 Mar 2023 20:47:18 +0200 Subject: [PATCH 102/632] web: display GOMEMLIMIT in runtime info Signed-off-by: Vladimir Varankin --- web/api/v1/api.go | 1 + web/web.go | 2 ++ 2 files changed, 3 insertions(+) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index c43f4573a..e1168e1a6 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -143,6 +143,7 @@ type RuntimeInfo struct { CorruptionCount int64 `json:"corruptionCount"` GoroutineCount int `json:"goroutineCount"` GOMAXPROCS int `json:"GOMAXPROCS"` + GOMEMLIMIT int64 `json:"GOMEMLIMIT"` GOGC string `json:"GOGC"` GODEBUG string `json:"GODEBUG"` StorageRetention string `json:"storageRetention"` diff --git a/web/web.go b/web/web.go index c8a32e0a6..27378b3b8 100644 --- a/web/web.go +++ b/web/web.go @@ -29,6 +29,7 @@ import ( "path" "path/filepath" "runtime" + "runtime/debug" "strconv" "strings" "sync" @@ -710,6 +711,7 @@ func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) { CWD: h.cwd, GoroutineCount: runtime.NumGoroutine(), GOMAXPROCS: runtime.GOMAXPROCS(0), + GOMEMLIMIT: debug.SetMemoryLimit(-1), GOGC: os.Getenv("GOGC"), GODEBUG: os.Getenv("GODEBUG"), } From aeccf9e7700a21268622a5bfb3b8e29acde95360 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 21 Apr 2023 18:12:57 +0200 Subject: [PATCH 103/632] Bump version to 2.44.0-rc0 Including CHANGELOG. Signed-off-by: Bryan Boreham --- CHANGELOG.md | 15 +++++++++++++++ VERSION | 2 +- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/react-app/package.json | 4 ++-- 6 files changed, 28 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 13bdde031..526ec89c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Changelog +## 2.44.0-rc.0 / 2023-04-22 + +* [CHANGE] remote-write: raise default samples per send to 2,000. #12203 +* [FEATURE] remote-read: handle native histograms. #12085, #12192 +* [FEATURE] promtool: health and readiness check of prometheus server in CLI. #12096 +* [FEATURE] promql engine: add query_samples_total metric, the total number of samples loaded by all queries. #12251 +* [ENHANCEMENT] scraping: reduce memory allocations on Target labels. #12084 +* [ENHANCEMENT] promql: use faster heap method for topk/bottomk. #12190 +* [ENHANCEMENT] rules API: Allow filtering by rule name. #12270 +* [ENHANCEMENT] native histograms: various fixes and improvements. #11687, #12264, #12272 +* [ENHANCEMENT] ui: search of "Scrape Pools" is now case-insensitive. #12207 +* [ENHANCEMENT] tsdb: add an affirmative log message for successful WAL repair. #12135 +* [BUGFIX] tsdb: block compaction failed when shutting down. #12179 +* [BUGFIX] tsdb: out-of-order chunks could be ignored if the write-behind log was deleted. #12127 + ## 2.43.0 / 2023-03-21 We are working on some performance improvements in Prometheus, which are only diff --git a/VERSION b/VERSION index 5b9cd9afd..a4494d974 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.43.0 +2.44.0-rc.0 diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index f20a63468..6521c8729 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.43.0", + "version": "0.44.0-rc.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.43.0", + "@prometheus-io/lezer-promql": "0.44.0-rc.0", "lru-cache": "^6.0.0" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 411251e56..be92ac46f 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.43.0", + "version": "0.44.0-rc.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 2e334cf9e..09a4fd3fe 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -28,10 +28,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.43.0", + "version": "0.44.0-rc.0", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.43.0", + "@prometheus-io/lezer-promql": "0.44.0-rc.0", "lru-cache": "^6.0.0" }, "devDependencies": { @@ -61,7 +61,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.43.0", + "version": "0.44.0-rc.0", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.2.2", @@ -20763,7 +20763,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.43.0", + "version": "0.44.0-rc.0", "dependencies": { "@codemirror/autocomplete": "^6.4.0", "@codemirror/commands": "^6.2.0", @@ -20781,7 +20781,7 @@ "@lezer/lr": "^1.3.1", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.43.0", + "@prometheus-io/codemirror-promql": "0.44.0-rc.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.2.0", @@ -23417,7 +23417,7 @@ "@lezer/lr": "^1.3.1", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.43.0", + "@prometheus-io/codemirror-promql": "0.44.0-rc.0", "@testing-library/react-hooks": "^7.0.2", "@types/enzyme": "^3.10.12", "@types/flot": "0.0.32", @@ -23468,7 +23468,7 @@ "@lezer/common": "^1.0.2", "@lezer/highlight": "^1.1.3", "@lezer/lr": "^1.3.1", - "@prometheus-io/lezer-promql": "0.43.0", + "@prometheus-io/lezer-promql": "0.44.0-rc.0", "@types/lru-cache": "^5.1.1", "isomorphic-fetch": "^3.0.0", "lru-cache": "^6.0.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index c8b115582..b4929d664 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.43.0", + "version": "0.44.0-rc.0", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.4.0", @@ -19,7 +19,7 @@ "@lezer/common": "^1.0.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.43.0", + "@prometheus-io/codemirror-promql": "0.44.0-rc.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.2.0", From dfabc69303a92804883f597570569b0e95fd9836 Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Tue, 25 Apr 2023 01:41:04 +0800 Subject: [PATCH 104/632] Add tests according to code review Signed-off-by: Jeanette Tan --- scrape/scrape_test.go | 38 +++++++++++++++------------ scrape/target_test.go | 61 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 16 deletions(-) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 8822ed96d..745765ee8 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1742,18 +1742,24 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { require.NoError(t, err) beforeMetricValue := metric.GetCounter().GetValue() - nativeHistogram := prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "testing", - Name: "example_native_histogram", - Help: "This is used for testing", - ConstLabels: map[string]string{"some": "value"}, - NativeHistogramBucketFactor: 1.1, // 10% increase from bucket to bucket - NativeHistogramMaxBucketNumber: 100, // intentionally higher than the limit we'll use in the scraper - }) + nativeHistogram := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "testing", + Name: "example_native_histogram", + Help: "This is used for testing", + ConstLabels: map[string]string{"some": "value"}, + NativeHistogramBucketFactor: 1.1, // 10% increase from bucket to bucket + NativeHistogramMaxBucketNumber: 100, // intentionally higher than the limit we'll use in the scraper + }, + []string{"size"}, + ) registry := prometheus.NewRegistry() registry.Register(nativeHistogram) - nativeHistogram.Observe(1.0) - nativeHistogram.Observe(10.0) // in different bucket since > 1*1.1 + nativeHistogram.WithLabelValues("S").Observe(1.0) + nativeHistogram.WithLabelValues("M").Observe(1.0) + nativeHistogram.WithLabelValues("L").Observe(1.0) + nativeHistogram.WithLabelValues("M").Observe(10.0) + nativeHistogram.WithLabelValues("L").Observe(10.0) // in different bucket since > 1*1.1 gathered, err := registry.Gather() require.NoError(t, err) @@ -1766,9 +1772,9 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { now := time.Now() total, added, seriesAdded, err := sl.append(app, msg, "application/vnd.google.protobuf", now) require.NoError(t, err) - require.Equal(t, 1, total) - require.Equal(t, 1, added) - require.Equal(t, 1, seriesAdded) + require.Equal(t, 3, total) + require.Equal(t, 3, added) + require.Equal(t, 3, seriesAdded) err = targetScrapeNativeHistogramBucketLimit.Write(&metric) require.NoError(t, err) @@ -1776,7 +1782,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { require.Equal(t, beforeMetricValue, metricValue) beforeMetricValue = metricValue - nativeHistogram.Observe(100.0) // in different bucket since > 10*1.1 + nativeHistogram.WithLabelValues("L").Observe(100.0) // in different bucket since > 10*1.1 gathered, err = registry.Gather() require.NoError(t, err) @@ -1792,8 +1798,8 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { t.Fatalf("Did not see expected histogram bucket limit error: %s", err) } require.NoError(t, app.Rollback()) - require.Equal(t, 1, total) - require.Equal(t, 1, added) + require.Equal(t, 3, total) + require.Equal(t, 3, added) require.Equal(t, 0, seriesAdded) err = targetScrapeNativeHistogramBucketLimit.Write(&metric) diff --git a/scrape/target_test.go b/scrape/target_test.go index 9d25df414..12d3b5a4d 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -31,6 +31,7 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" ) @@ -488,3 +489,63 @@ scrape_configs: }) } } + +func TestBucketLimitAppender(t *testing.T) { + example := histogram.Histogram{ + Schema: 0, + Count: 21, + Sum: 33, + ZeroThreshold: 0.001, + ZeroCount: 3, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{3, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + NegativeBuckets: []int64{3, 0, 0}, + } + + cases := []struct { + h histogram.Histogram + limit int + expectError bool + }{ + { + h: example, + limit: 3, + expectError: true, + }, + { + h: example, + limit: 10, + expectError: false, + }, + } + + resApp := &collectResultAppender{} + + for _, c := range cases { + for _, floatHisto := range []bool{true, false} { + t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) { + app := &bucketLimitAppender{Appender: resApp, limit: c.limit} + ts := int64(10 * time.Minute / time.Millisecond) + h := c.h + lbls := labels.FromStrings("__name__", "sparse_histogram_series") + var err error + if floatHisto { + _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat()) + } else { + _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) + } + if c.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + require.NoError(t, app.Commit()) + }) + } + } +} From 276ca6a883bed19c2c78c356f93e0b939841f584 Mon Sep 17 00:00:00 2001 From: cui fliter Date: Tue, 25 Apr 2023 14:19:16 +0800 Subject: [PATCH 105/632] fix some comments Signed-off-by: cui fliter --- CONTRIBUTING.md | 2 +- discovery/kubernetes/node.go | 2 +- model/labels/labels.go | 2 +- model/labels/labels_string.go | 2 +- tsdb/db.go | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2fbed3880..57055ef38 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -85,7 +85,7 @@ The PromQL parser grammar is located in `promql/parser/generated_parser.y` and i The parser is built using [goyacc](https://pkg.go.dev/golang.org/x/tools/cmd/goyacc) If doing some sort of debugging, then it is possible to add some verbose output. After generating the parser, then you -can modify the the `./promql/parser/generated_parser.y.go` manually. +can modify the `./promql/parser/generated_parser.y.go` manually. ```golang // As of writing this was somewhere around line 600. diff --git a/discovery/kubernetes/node.go b/discovery/kubernetes/node.go index 16a06e7a0..d0a6d2780 100644 --- a/discovery/kubernetes/node.go +++ b/discovery/kubernetes/node.go @@ -209,7 +209,7 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group { return tg } -// nodeAddresses returns the provided node's address, based on the priority: +// nodeAddress returns the provided node's address, based on the priority: // 1. NodeInternalIP // 2. NodeInternalDNS // 3. NodeExternalIP diff --git a/model/labels/labels.go b/model/labels/labels.go index 93524ddcf..4f8aedd49 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -622,7 +622,7 @@ func (b *ScratchBuilder) Sort() { slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) } -// Asssign is for when you already have a Labels which you want this ScratchBuilder to return. +// Assign is for when you already have a Labels which you want this ScratchBuilder to return. func (b *ScratchBuilder) Assign(ls Labels) { b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice. } diff --git a/model/labels/labels_string.go b/model/labels/labels_string.go index ff46103eb..da8229796 100644 --- a/model/labels/labels_string.go +++ b/model/labels/labels_string.go @@ -799,7 +799,7 @@ func (b *ScratchBuilder) Sort() { slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) } -// Asssign is for when you already have a Labels which you want this ScratchBuilder to return. +// Assign is for when you already have a Labels which you want this ScratchBuilder to return. func (b *ScratchBuilder) Assign(l Labels) { b.output = l } diff --git a/tsdb/db.go b/tsdb/db.go index de37bdb4f..61333c972 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -1841,7 +1841,7 @@ func (db *DB) Querier(_ context.Context, mint, maxt int64) (storage.Querier, err return storage.NewMergeQuerier(blockQueriers, nil, storage.ChainedSeriesMerge), nil } -// blockQueriersForRange returns individual block chunk queriers from the persistent blocks, in-order head block, and the +// blockChunkQuerierForRange returns individual block chunk queriers from the persistent blocks, in-order head block, and the // out-of-order head block, overlapping with the given time range. func (db *DB) blockChunkQuerierForRange(mint, maxt int64) ([]storage.ChunkQuerier, error) { var blocks []BlockReader From 8bfd1621989a0cfb4c18743588e1944fd99fc363 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 25 Apr 2023 10:03:41 +0000 Subject: [PATCH 106/632] Review feedback - mostly capitalization Signed-off-by: Bryan Boreham --- CHANGELOG.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 526ec89c6..9daeaae04 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,18 +2,18 @@ ## 2.44.0-rc.0 / 2023-04-22 -* [CHANGE] remote-write: raise default samples per send to 2,000. #12203 -* [FEATURE] remote-read: handle native histograms. #12085, #12192 -* [FEATURE] promtool: health and readiness check of prometheus server in CLI. #12096 -* [FEATURE] promql engine: add query_samples_total metric, the total number of samples loaded by all queries. #12251 -* [ENHANCEMENT] scraping: reduce memory allocations on Target labels. #12084 -* [ENHANCEMENT] promql: use faster heap method for topk/bottomk. #12190 -* [ENHANCEMENT] rules API: Allow filtering by rule name. #12270 -* [ENHANCEMENT] native histograms: various fixes and improvements. #11687, #12264, #12272 -* [ENHANCEMENT] ui: search of "Scrape Pools" is now case-insensitive. #12207 -* [ENHANCEMENT] tsdb: add an affirmative log message for successful WAL repair. #12135 -* [BUGFIX] tsdb: block compaction failed when shutting down. #12179 -* [BUGFIX] tsdb: out-of-order chunks could be ignored if the write-behind log was deleted. #12127 +* [CHANGE] Remote-write: Raise default samples per send to 2,000. #12203 +* [FEATURE] Remote-read: Handle native histograms. #12085, #12192 +* [FEATURE] Promtool: Health and readiness check of prometheus server in CLI. #12096 +* [FEATURE] PromQL: Add `query_samples_total` metric, the total number of samples loaded by all queries. #12251 +* [ENHANCEMENT] Scrape: Reduce memory allocations on target labels. #12084 +* [ENHANCEMENT] PromQL: Use faster heap method for `topk()` / `bottomk()`. #12190 +* [ENHANCEMENT] Rules API: Allow filtering by rule name. #12270 +* [ENHANCEMENT] Native Histograms: Various fixes and improvements. #11687, #12264, #12272 +* [ENHANCEMENT] UI: Search of scraping pools is now case-insensitive. #12207 +* [ENHANCEMENT] TSDB: Add an affirmative log message for successful WAL repair. #12135 +* [BUGFIX] TSDB: Block compaction failed when shutting down. #12179 +* [BUGFIX] TSDB: Out-of-order chunks could be ignored if the write-behind log was deleted. #12127 ## 2.43.0 / 2023-03-21 From 6b25e9a923ec5f5a7274a012af4c94f8fb669b9f Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 25 Apr 2023 10:07:26 +0000 Subject: [PATCH 107/632] build: turn on stringlabels by default This setting uses less memory, and was optional in previous release 2.43. Signed-off-by: Bryan Boreham --- .promu.yml | 2 ++ CHANGELOG.md | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/.promu.yml b/.promu.yml index ef69c35c8..f724dc34f 100644 --- a/.promu.yml +++ b/.promu.yml @@ -14,8 +14,10 @@ build: all: - netgo - builtinassets + - stringlabels windows: - builtinassets + - stringlabels flags: -a ldflags: | -X github.com/prometheus/common/version.Version={{.Version}} diff --git a/CHANGELOG.md b/CHANGELOG.md index 9daeaae04..8fc24345c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ ## 2.44.0-rc.0 / 2023-04-22 +This version is built with Go tag `stringlabels`, to use the smaller data +structure for Labels that was optional in the previous release. For more +details about this code change see #10991. + * [CHANGE] Remote-write: Raise default samples per send to 2,000. #12203 * [FEATURE] Remote-read: Handle native histograms. #12085, #12192 * [FEATURE] Promtool: Health and readiness check of prometheus server in CLI. #12096 From c06c02b3b10b3d98bd7d59400d0a1b3762e90af3 Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Tue, 25 Apr 2023 16:27:39 +0300 Subject: [PATCH 108/632] Fix recommended protoc version Signed-off-by: Paschalis Tsilias --- prompb/README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/prompb/README.md b/prompb/README.md index 8c19b17e9..1b85bc2f3 100644 --- a/prompb/README.md +++ b/prompb/README.md @@ -4,6 +4,8 @@ re-compile them when building Prometheus. If however you have modified the defs and do need to re-compile, run `make proto` from the parent dir. -In order for the script to run, you'll need `protoc` (version 3.12.3) in your -PATH. +In order for the [script][] to run, you'll need `protoc` (version 3.15.8) in +your PATH. + +[script]: ../scripts/genproto.sh From 417e847662e248e2a0213f9ee34cc99326f5f4e0 Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Tue, 25 Apr 2023 17:17:22 +0300 Subject: [PATCH 109/632] Update prompb/README.md Co-authored-by: Julien Pivotto Signed-off-by: Paschalis Tsilias --- prompb/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prompb/README.md b/prompb/README.md index 1b85bc2f3..c7c31399c 100644 --- a/prompb/README.md +++ b/prompb/README.md @@ -4,7 +4,7 @@ re-compile them when building Prometheus. If however you have modified the defs and do need to re-compile, run `make proto` from the parent dir. -In order for the [script][] to run, you'll need `protoc` (version 3.15.8) in +In order for the [script][../scripts/genproto.sh] to run, you'll need `protoc` (version 3.15.8) in your PATH. [script]: ../scripts/genproto.sh From 38de61b59b38b35fc33b1ef8f9a35f394e733f13 Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Tue, 25 Apr 2023 17:58:42 +0200 Subject: [PATCH 110/632] Propose Jesus Vazquez as 2.45 release shepherd Signed-off-by: Jesus Vazquez --- RELEASE.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index 26f72ba62..d7f24dabd 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -49,7 +49,8 @@ Release cadence of first pre-releases being cut is 6 weeks. | v2.42 | 2023-01-25 | Kemal Akkoyun (GitHub: @kakkoyun) | | v2.43 | 2023-03-08 | Julien Pivotto (GitHub: @roidelapluie) | | v2.44 | 2023-04-19 | Bryan Boreham (GitHub: @bboreham) | -| v2.45 | 2023-05-31 | **searching for volunteer** | +| v2.45 | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) | +| v2.46 | 2023-07-12 | **searching for volunteer** | If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. From 93f64754c049f05274c4fdbfd20e5054da7c4682 Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Wed, 26 Apr 2023 12:43:19 +0300 Subject: [PATCH 111/632] Remove extra line Signed-off-by: Paschalis Tsilias --- prompb/README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/prompb/README.md b/prompb/README.md index c7c31399c..d71438afb 100644 --- a/prompb/README.md +++ b/prompb/README.md @@ -6,6 +6,3 @@ If however you have modified the defs and do need to re-compile, run In order for the [script][../scripts/genproto.sh] to run, you'll need `protoc` (version 3.15.8) in your PATH. - -[script]: ../scripts/genproto.sh - From 55626c6911e0ceb87920d94e5a15c8cf6c49d962 Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Wed, 26 Apr 2023 12:44:15 +0300 Subject: [PATCH 112/632] Fix final newline Signed-off-by: Paschalis Tsilias --- prompb/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/prompb/README.md b/prompb/README.md index d71438afb..c1acb508a 100644 --- a/prompb/README.md +++ b/prompb/README.md @@ -6,3 +6,4 @@ If however you have modified the defs and do need to re-compile, run In order for the [script][../scripts/genproto.sh] to run, you'll need `protoc` (version 3.15.8) in your PATH. + From 8a34c43515fc0b9452ecf4f238c44a9f249d5da3 Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Wed, 26 Apr 2023 12:57:20 +0300 Subject: [PATCH 113/632] Update prompb/README.md Co-authored-by: Julien Pivotto Signed-off-by: Paschalis Tsilias --- prompb/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prompb/README.md b/prompb/README.md index c1acb508a..a33d7bfb8 100644 --- a/prompb/README.md +++ b/prompb/README.md @@ -4,6 +4,6 @@ re-compile them when building Prometheus. If however you have modified the defs and do need to re-compile, run `make proto` from the parent dir. -In order for the [script][../scripts/genproto.sh] to run, you'll need `protoc` (version 3.15.8) in +In order for the [script](../scripts/genproto.sh) to run, you'll need `protoc` (version 3.15.8) in your PATH. From 28f5502828e88ac81a4cc5f1fd00bd4b2091189f Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 26 Apr 2023 10:26:58 -0400 Subject: [PATCH 114/632] scrape: fix two loop variable scoping bugs in test Consider code like: for i := 0; i < numTargets; i++ { stopFuncs = append(stopFuncs, func() { time.Sleep(i*20*time.Millisecond) }) } Because the loop variable i is shared by all closures, all the stopFuncs sleep for numTargets*20 ms. If the i were made per-iteration, as we are considering for a future Go release, the stopFuncs would have sleep durations ranging from 0 to (numTargets-1)*20 ms. Two tests had code like this and were checking that the aggregate sleep was at least numTargets*20 ms ("at least as long as the last target slept"). This is only true today because i == numTarget during all the sleeps. To keep the code working even if the semantics of this loop change, this PR computes d := time.Duration((i+1)*20) * time.Millisecond outside the closure (but inside the loop body), and then each closure has its own d. Now the sleeps range from 20 ms to numTargets*20 ms, keeping the test passing (and probably behaving closer to the intent of the test author). The failure being fixed can be reproduced by using the current Go development branch with GOEXPERIMENT=loopvar go test Signed-off-by: Russ Cox --- scrape/scrape_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 2a2ca0948..60e335b5b 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -190,8 +190,9 @@ func TestScrapePoolStop(t *testing.T) { labels: labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i)), } l := &testLoop{} + d := time.Duration((i+1)*20) * time.Millisecond l.stopFunc = func() { - time.Sleep(time.Duration(i*20) * time.Millisecond) + time.Sleep(d) mtx.Lock() stopped[t.hash()] = true @@ -273,8 +274,9 @@ func TestScrapePoolReload(t *testing.T) { discoveredLabels: labels, } l := &testLoop{} + d := time.Duration((i+1)*20) * time.Millisecond l.stopFunc = func() { - time.Sleep(time.Duration(i*20) * time.Millisecond) + time.Sleep(d) mtx.Lock() stopped[t.hash()] = true From b1bab7bc54ed06f9a8619e7dd09b7b2ed55d3ee4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Thu, 27 Apr 2023 13:23:52 +0200 Subject: [PATCH 115/632] feat(promtool): add push metrics command MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- cmd/promtool/main.go | 15 +++ cmd/promtool/metrics.go | 234 ++++++++++++++++++++++++++++++++++ docs/command-line/promtool.md | 43 +++++++ 3 files changed, 292 insertions(+) create mode 100644 cmd/promtool/metrics.go diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index de002a0b2..c4077954e 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -178,6 +178,18 @@ func main() { queryLabelsEnd := queryLabelsCmd.Flag("end", "End time (RFC3339 or Unix timestamp).").String() queryLabelsMatch := queryLabelsCmd.Flag("match", "Series selector. Can be specified multiple times.").Strings() + pushCmd := app.Command("push", "Push to a Prometheus server.") + pushCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath) + pushMetricsCmd := pushCmd.Command("metrics", "Push metrics to a prometheus remote write.") + pushMetricsCmd.Arg("remote-write-url", "Prometheus remote write url to push metrics.").Required().URLVar(&serverURL) + metricFiles := pushMetricsCmd.Arg( + "metric-files", + "The metric files to push.", + ).Required().ExistingFiles() + metricJobLabel := pushMetricsCmd.Flag("job-label", "Job label to attach to metrics.").Default("promtool").String() + pushMetricsTimeout := pushMetricsCmd.Flag("timeout", "The time to wait for pushing metrics.").Default("30s").Duration() + pushMetricsHeaders := pushMetricsCmd.Flag("header", "Prometheus remote write header.").StringMap() + testCmd := app.Command("test", "Unit testing.") testRulesCmd := testCmd.Command("rules", "Unit tests for rules.") testRulesFiles := testRulesCmd.Arg( @@ -301,6 +313,9 @@ func main() { case checkMetricsCmd.FullCommand(): os.Exit(CheckMetrics(*checkMetricsExtended)) + case pushMetricsCmd.FullCommand(): + os.Exit(PushMetrics(serverURL, httpRoundTripper, *pushMetricsHeaders, *pushMetricsTimeout, *metricJobLabel, *metricFiles...)) + case queryInstantCmd.FullCommand(): os.Exit(QueryInstant(serverURL, httpRoundTripper, *queryInstantExpr, *queryInstantTime, p)) diff --git a/cmd/promtool/metrics.go b/cmd/promtool/metrics.go new file mode 100644 index 000000000..21fcd3e66 --- /dev/null +++ b/cmd/promtool/metrics.go @@ -0,0 +1,234 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/url" + "os" + "sort" + "time" + + "github.com/golang/snappy" + dto "github.com/prometheus/client_model/go" + config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/storage/remote" +) + +// Push metrics to a prometheus remote write. +func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, jobLabel string, files ...string) int { + // remote write should respect specification: https://prometheus.io/docs/concepts/remote_write_spec/ + failed := false + + addressURL, err := url.Parse(url.String()) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return failureExitCode + } + + // build remote write client + writeClient, err := remote.NewWriteClient("remote-write", &remote.ClientConfig{ + URL: &config_util.URL{URL: addressURL}, + Timeout: model.Duration(timeout), + }) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return failureExitCode + } + + // set custom tls config from httpConfigFilePath + // set custom headers to every request + client, ok := writeClient.(*remote.Client) + if !ok { + fmt.Fprintln(os.Stderr, fmt.Errorf("unexpected type %T", writeClient)) + return failureExitCode + } + client.Client.Transport = &setHeadersTransport{ + RoundTripper: roundTripper, + headers: headers, + } + + for _, f := range files { + var data []byte + var err error + data, err = os.ReadFile(f) + if err != nil { + fmt.Fprintln(os.Stderr, err) + failed = true + continue + } + + fmt.Printf("Parsing metric file %s\n", f) + metricsData, err := parseMetricsTextAndFormat(bytes.NewReader(data), jobLabel) + if err != nil { + fmt.Fprintln(os.Stderr, err) + failed = true + continue + } + + raw, err := metricsData.Marshal() + if err != nil { + fmt.Fprintln(os.Stderr, err) + failed = true + continue + } + + // Encode the request body into snappy encoding. + compressed := snappy.Encode(nil, raw) + err = client.Store(context.Background(), compressed) + if err != nil { + fmt.Fprintln(os.Stderr, err) + failed = true + continue + } + fmt.Printf("Successfully pushed metric file %s\n", f) + } + + if failed { + return failureExitCode + } + + return successExitCode +} + +type setHeadersTransport struct { + http.RoundTripper + headers map[string]string +} + +func (s *setHeadersTransport) RoundTrip(req *http.Request) (*http.Response, error) { + for key, value := range s.headers { + req.Header.Set(key, value) + } + return s.RoundTripper.RoundTrip(req) +} + +var MetricMetadataTypeValue = map[string]int32{ + "UNKNOWN": 0, + "COUNTER": 1, + "GAUGE": 2, + "HISTOGRAM": 3, + "GAUGEHISTOGRAM": 4, + "SUMMARY": 5, + "INFO": 6, + "STATESET": 7, +} + +// formatMetrics convert metric family to a writerequest +func formatMetrics(mf map[string]*dto.MetricFamily, jobLabel string) (*prompb.WriteRequest, error) { + wr := &prompb.WriteRequest{} + + // build metric list + sortedMetricNames := make([]string, 0, len(mf)) + for metric := range mf { + sortedMetricNames = append(sortedMetricNames, metric) + } + // sort metrics name in lexicographical order + sort.Strings(sortedMetricNames) + + for _, metricName := range sortedMetricNames { + // Set metadata writerequest + mtype := MetricMetadataTypeValue[mf[metricName].Type.String()] + metadata := prompb.MetricMetadata{ + MetricFamilyName: mf[metricName].GetName(), + Type: prompb.MetricMetadata_MetricType(mtype), + Help: mf[metricName].GetHelp(), + } + wr.Metadata = append(wr.Metadata, metadata) + + for _, metric := range mf[metricName].Metric { + var timeserie prompb.TimeSeries + + // build labels map + labels := make(map[string]string, len(metric.Label)+2) + labels[model.MetricNameLabel] = metricName + labels[model.JobLabel] = jobLabel + + for _, label := range metric.Label { + labelname := label.GetName() + if labelname == model.JobLabel { + labelname = fmt.Sprintf("%s%s", model.ExportedLabelPrefix, labelname) + } + labels[labelname] = label.GetValue() + } + + // build labels name list + sortedLabelNames := make([]string, 0, len(labels)) + for label := range labels { + sortedLabelNames = append(sortedLabelNames, label) + } + // sort labels name in lexicographical order + sort.Strings(sortedLabelNames) + + for _, label := range sortedLabelNames { + timeserie.Labels = append(timeserie.Labels, prompb.Label{ + Name: label, + Value: labels[label], + }) + } + + timeserie.Samples = []prompb.Sample{ + { + Timestamp: time.Now().UnixNano() / int64(time.Millisecond), + Value: getMetricsValue(metric), + }, + } + + wr.Timeseries = append(wr.Timeseries, timeserie) + } + } + return wr, nil +} + +// parseMetricsTextReader consumes an io.Reader and returns the MetricFamily +func parseMetricsTextReader(input io.Reader) (map[string]*dto.MetricFamily, error) { + var parser expfmt.TextParser + mf, err := parser.TextToMetricFamilies(input) + if err != nil { + return nil, err + } + return mf, nil +} + +// getMetricsValue return the value of a timeserie without the need to give value type +func getMetricsValue(m *dto.Metric) float64 { + switch { + case m.Gauge != nil: + return m.GetGauge().GetValue() + case m.Counter != nil: + return m.GetCounter().GetValue() + case m.Untyped != nil: + return m.GetUntyped().GetValue() + default: + return 0. + } +} + +// parseMetricsTextAndFormat return the data in the expected prometheus metrics write request format +func parseMetricsTextAndFormat(input io.Reader, jobLabel string) (*prompb.WriteRequest, error) { + mf, err := parseMetricsTextReader(input) + if err != nil { + return nil, err + } + + return formatMetrics(mf, jobLabel) +} diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index e149d374a..ac159a921 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -27,6 +27,7 @@ Tooling for the Prometheus monitoring system. | check | Check the resources for validity. | | query | Run query against a Prometheus server. | | debug | Fetch debug information. | +| push | Push to a Prometheus server. | | test | Unit testing. | | tsdb | Run tsdb commands. | @@ -372,6 +373,48 @@ Fetch all debug information. +### `promtool push` + +Push to a Prometheus server. + + + +#### Flags + +| Flag | Description | +| --- | --- | +| --http.config.file | HTTP client configuration file for promtool to connect to Prometheus. | + + + + +##### `promtool push metrics` + +Push metrics to a prometheus remote write. + + + +###### Flags + +| Flag | Description | Default | +| --- | --- | --- | +| --job-label | Job label to attach to metrics. | `promtool` | +| --timeout | The time to wait for pushing metrics. | `30s` | +| --header | Prometheus remote write header. | | + + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| remote-write-url | Prometheus remote write url to push metrics. | Yes | +| metric-files | The metric files to push. | Yes | + + + + ### `promtool test` Unit testing. From 0d049feac760444cbbafadbe8794843a6f47dba4 Mon Sep 17 00:00:00 2001 From: Filip Petkovski Date: Fri, 28 Apr 2023 22:52:21 +0200 Subject: [PATCH 116/632] Fix encoding samples in ChunkSeries (#12185) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The storage.ChunkSeries iterator assumes that a histogram sample can always be appended to the currently open chunk. This is not the case when there is a counter reset, or when appending a stale sample to a chunk with non-stale samples. In addition, the open chunk sometimes needs to be recoded before a sample can be appended. This commit addresses the issue by implementing a RecodingAppender which can recode incoming samples in a transparent way. It also detects cases when a sample cannot be appended at all and returns `false` so that the caller can open a new chunk. Signed-off-by: Filip Petkovski Signed-off-by: György Krajcsovits Signed-off-by: Ganesh Vernekar Co-authored-by: György Krajcsovits Co-authored-by: Ganesh Vernekar --- storage/series.go | 192 ++++++++++++++++++++++++--- storage/series_test.go | 286 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 461 insertions(+), 17 deletions(-) diff --git a/storage/series.go b/storage/series.go index f609df3f0..5daa6255d 100644 --- a/storage/series.go +++ b/storage/series.go @@ -281,7 +281,7 @@ func NewSeriesToChunkEncoder(series Series) ChunkSeries { func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator { var ( chk chunkenc.Chunk - app chunkenc.Appender + app *RecodingAppender err error ) mint := int64(math.MaxInt64) @@ -299,21 +299,16 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator { for typ := seriesIter.Next(); typ != chunkenc.ValNone; typ = seriesIter.Next() { if typ != lastType || i >= seriesToChunkEncoderSplit { // Create a new chunk if the sample type changed or too many samples in the current one. - if chk != nil { - chks = append(chks, chunks.Meta{ - MinTime: mint, - MaxTime: maxt, - Chunk: chk, - }) - } + chks = appendChunk(chks, mint, maxt, chk) chk, err = chunkenc.NewEmptyChunk(typ.ChunkEncoding()) if err != nil { return errChunksIterator{err: err} } - app, err = chk.Appender() + chkAppender, err := chk.Appender() if err != nil { return errChunksIterator{err: err} } + app = NewRecodingAppender(&chk, chkAppender) mint = int64(math.MaxInt64) // maxt is immediately overwritten below which is why setting it here won't make a difference. i = 0 @@ -332,10 +327,45 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator { app.Append(t, v) case chunkenc.ValHistogram: t, h = seriesIter.AtHistogram() - app.AppendHistogram(t, h) + if ok, counterReset := app.AppendHistogram(t, h); !ok { + chks = appendChunk(chks, mint, maxt, chk) + histChunk := chunkenc.NewHistogramChunk() + if counterReset { + histChunk.SetCounterResetHeader(chunkenc.CounterReset) + } + chk = histChunk + + chkAppender, err := chk.Appender() + if err != nil { + return errChunksIterator{err: err} + } + mint = int64(math.MaxInt64) + i = 0 + app = NewRecodingAppender(&chk, chkAppender) + if ok, _ := app.AppendHistogram(t, h); !ok { + panic("unexpected error while appending histogram") + } + } case chunkenc.ValFloatHistogram: t, fh = seriesIter.AtFloatHistogram() - app.AppendFloatHistogram(t, fh) + if ok, counterReset := app.AppendFloatHistogram(t, fh); !ok { + chks = appendChunk(chks, mint, maxt, chk) + floatHistChunk := chunkenc.NewFloatHistogramChunk() + if counterReset { + floatHistChunk.SetCounterResetHeader(chunkenc.CounterReset) + } + chk = floatHistChunk + chkAppender, err := chk.Appender() + if err != nil { + return errChunksIterator{err: err} + } + mint = int64(math.MaxInt64) + i = 0 + app = NewRecodingAppender(&chk, chkAppender) + if ok, _ := app.AppendFloatHistogram(t, fh); !ok { + panic("unexpected error while float appending histogram") + } + } default: return errChunksIterator{err: fmt.Errorf("unknown sample type %s", typ.String())} } @@ -350,6 +380,16 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator { return errChunksIterator{err: err} } + chks = appendChunk(chks, mint, maxt, chk) + + if existing { + lcsi.Reset(chks...) + return lcsi + } + return NewListChunkSeriesIterator(chks...) +} + +func appendChunk(chks []chunks.Meta, mint, maxt int64, chk chunkenc.Chunk) []chunks.Meta { if chk != nil { chks = append(chks, chunks.Meta{ MinTime: mint, @@ -357,12 +397,7 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator { Chunk: chk, }) } - - if existing { - lcsi.Reset(chks...) - return lcsi - } - return NewListChunkSeriesIterator(chks...) + return chks } type errChunksIterator struct { @@ -420,3 +455,126 @@ func ExpandChunks(iter chunks.Iterator) ([]chunks.Meta, error) { } return result, iter.Err() } + +// RecodingAppender is a tsdb.Appender that recodes histogram samples if needed during appends. +// It takes an existing appender and a chunk to which samples are appended. +type RecodingAppender struct { + chk *chunkenc.Chunk + app chunkenc.Appender +} + +func NewRecodingAppender(chk *chunkenc.Chunk, app chunkenc.Appender) *RecodingAppender { + return &RecodingAppender{ + chk: chk, + app: app, + } +} + +// Append appends a float sample to the appender. +func (a *RecodingAppender) Append(t int64, v float64) { + a.app.Append(t, v) +} + +// AppendHistogram appends a histogram sample to the underlying chunk. +// The method returns false if the sample cannot be appended and a boolean value set to true +// when it is not appendable because of a counter reset. +// If counterReset is true, okToAppend is always false. +func (a *RecodingAppender) AppendHistogram(t int64, h *histogram.Histogram) (okToAppend, counterReset bool) { + app, ok := a.app.(*chunkenc.HistogramAppender) + if !ok { + return false, false + } + + if app.NumSamples() == 0 { + a.app.AppendHistogram(t, h) + return true, false + } + + var ( + pForwardInserts, nForwardInserts []chunkenc.Insert + pBackwardInserts, nBackwardInserts []chunkenc.Insert + pMergedSpans, nMergedSpans []histogram.Span + ) + switch h.CounterResetHint { + case histogram.GaugeType: + pForwardInserts, nForwardInserts, + pBackwardInserts, nBackwardInserts, + pMergedSpans, nMergedSpans, + okToAppend = app.AppendableGauge(h) + default: + pForwardInserts, nForwardInserts, okToAppend, counterReset = app.Appendable(h) + } + if !okToAppend || counterReset { + return false, counterReset + } + + if len(pBackwardInserts)+len(nBackwardInserts) > 0 { + h.PositiveSpans = pMergedSpans + h.NegativeSpans = nMergedSpans + app.RecodeHistogram(h, pBackwardInserts, nBackwardInserts) + } + if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 { + chk, app := app.Recode( + pForwardInserts, nForwardInserts, + h.PositiveSpans, h.NegativeSpans, + ) + *a.chk = chk + a.app = app + } + + a.app.AppendHistogram(t, h) + return true, counterReset +} + +// AppendFloatHistogram appends a float histogram sample to the underlying chunk. +// The method returns false if the sample cannot be appended and a boolean value set to true +// when it is not appendable because of a counter reset. +// If counterReset is true, okToAppend is always false. +func (a *RecodingAppender) AppendFloatHistogram(t int64, fh *histogram.FloatHistogram) (okToAppend, counterReset bool) { + app, ok := a.app.(*chunkenc.FloatHistogramAppender) + if !ok { + return false, false + } + + if app.NumSamples() == 0 { + a.app.AppendFloatHistogram(t, fh) + return true, false + } + + var ( + pForwardInserts, nForwardInserts []chunkenc.Insert + pBackwardInserts, nBackwardInserts []chunkenc.Insert + pMergedSpans, nMergedSpans []histogram.Span + ) + switch fh.CounterResetHint { + case histogram.GaugeType: + pForwardInserts, nForwardInserts, + pBackwardInserts, nBackwardInserts, + pMergedSpans, nMergedSpans, + okToAppend = app.AppendableGauge(fh) + default: + pForwardInserts, nForwardInserts, okToAppend, counterReset = app.Appendable(fh) + } + + if !okToAppend || counterReset { + return false, counterReset + } + + if len(pBackwardInserts)+len(nBackwardInserts) > 0 { + fh.PositiveSpans = pMergedSpans + fh.NegativeSpans = nMergedSpans + app.RecodeHistogramm(fh, pBackwardInserts, nBackwardInserts) + } + + if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 { + chunk, app := app.Recode( + pForwardInserts, nForwardInserts, + fh.PositiveSpans, fh.NegativeSpans, + ) + *a.chk = chunk + a.app = app + } + + a.app.AppendFloatHistogram(t, fh) + return true, counterReset +} diff --git a/storage/series_test.go b/storage/series_test.go index 210a68e28..4c318f1a0 100644 --- a/storage/series_test.go +++ b/storage/series_test.go @@ -14,12 +14,17 @@ package storage import ( + "fmt" + "math" "testing" "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/tsdbutil" ) @@ -119,3 +124,284 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) { } } } + +type histogramTest struct { + samples []tsdbutil.Sample + expectedChunks int + expectedCounterReset bool +} + +func TestHistogramSeriesToChunks(t *testing.T) { + h1 := &histogram.Histogram{ + Count: 3, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + }, + PositiveBuckets: []int64{2, 1}, // Abs: 2, 3 + } + // Appendable to h1. + h2 := &histogram.Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -2, 3}, // Abs: 2, 3, 1, 4 + } + // Implicit counter reset by reduction in buckets, not appendable. + h2down := &histogram.Histogram{ + Count: 8, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 3}, // Abs: 1, 2, 1, 4 + } + + fh1 := &histogram.FloatHistogram{ + Count: 4, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + }, + PositiveBuckets: []float64{3, 1}, + } + // Appendable to fh1. + fh2 := &histogram.FloatHistogram{ + Count: 15, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{4, 2, 7, 2}, + } + // Implicit counter reset by reduction in buckets, not appendable. + fh2down := &histogram.FloatHistogram{ + Count: 13, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{2, 2, 7, 2}, + } + + staleHistogram := &histogram.Histogram{ + Sum: math.Float64frombits(value.StaleNaN), + } + staleFloatHistogram := &histogram.FloatHistogram{ + Sum: math.Float64frombits(value.StaleNaN), + } + + tests := map[string]histogramTest{ + "single histogram to single chunk": { + samples: []tsdbutil.Sample{ + hSample{t: 1, h: h1}, + }, + expectedChunks: 1, + }, + "two histograms encoded to a single chunk": { + samples: []tsdbutil.Sample{ + hSample{t: 1, h: h1}, + hSample{t: 2, h: h2}, + }, + expectedChunks: 1, + }, + "two histograms encoded to two chunks": { + samples: []tsdbutil.Sample{ + hSample{t: 1, h: h2}, + hSample{t: 2, h: h1}, + }, + expectedChunks: 2, + expectedCounterReset: true, + }, + "histogram and stale sample encoded to two chunks": { + samples: []tsdbutil.Sample{ + hSample{t: 1, h: staleHistogram}, + hSample{t: 2, h: h1}, + }, + expectedChunks: 2, + }, + "histogram and reduction in bucket encoded to two chunks": { + samples: []tsdbutil.Sample{ + hSample{t: 1, h: h1}, + hSample{t: 2, h: h2down}, + }, + expectedChunks: 2, + expectedCounterReset: true, + }, + // Float histograms. + "single float histogram to single chunk": { + samples: []tsdbutil.Sample{ + fhSample{t: 1, fh: fh1}, + }, + expectedChunks: 1, + }, + "two float histograms encoded to a single chunk": { + samples: []tsdbutil.Sample{ + fhSample{t: 1, fh: fh1}, + fhSample{t: 2, fh: fh2}, + }, + expectedChunks: 1, + }, + "two float histograms encoded to two chunks": { + samples: []tsdbutil.Sample{ + fhSample{t: 1, fh: fh2}, + fhSample{t: 2, fh: fh1}, + }, + expectedChunks: 2, + expectedCounterReset: true, + }, + "float histogram and stale sample encoded to two chunks": { + samples: []tsdbutil.Sample{ + fhSample{t: 1, fh: staleFloatHistogram}, + fhSample{t: 2, fh: fh1}, + }, + expectedChunks: 2, + }, + "float histogram and reduction in bucket encoded to two chunks": { + samples: []tsdbutil.Sample{ + fhSample{t: 1, fh: fh1}, + fhSample{t: 2, fh: fh2down}, + }, + expectedChunks: 2, + expectedCounterReset: true, + }, + // Mixed. + "histogram and float histogram encoded to two chunks": { + samples: []tsdbutil.Sample{ + hSample{t: 1, h: h1}, + fhSample{t: 2, fh: fh2}, + }, + expectedChunks: 2, + }, + "float histogram and histogram encoded to two chunks": { + samples: []tsdbutil.Sample{ + fhSample{t: 1, fh: fh1}, + hSample{t: 2, h: h2}, + }, + expectedChunks: 2, + }, + "histogram and stale float histogram encoded to two chunks": { + samples: []tsdbutil.Sample{ + hSample{t: 1, h: h1}, + fhSample{t: 2, fh: staleFloatHistogram}, + }, + expectedChunks: 2, + }, + } + + for testName, test := range tests { + t.Run(testName, func(t *testing.T) { + testHistogramsSeriesToChunks(t, test) + }) + } +} + +func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) { + lbs := labels.FromStrings("__name__", "up", "instance", "localhost:8080") + series := NewListSeries(lbs, test.samples) + encoder := NewSeriesToChunkEncoder(series) + require.EqualValues(t, lbs, encoder.Labels()) + + chks, err := ExpandChunks(encoder.Iterator(nil)) + require.NoError(t, err) + require.Equal(t, test.expectedChunks, len(chks)) + + // Decode all encoded samples and assert they are equal to the original ones. + encodedSamples := expandHistogramSamples(chks) + require.Equal(t, len(test.samples), len(encodedSamples)) + + for i, s := range test.samples { + switch expectedSample := s.(type) { + case hSample: + encodedSample, ok := encodedSamples[i].(hSample) + require.True(t, ok, "expect histogram", fmt.Sprintf("at idx %d", i)) + // Ignore counter reset here, will check on chunk level. + encodedSample.h.CounterResetHint = histogram.UnknownCounterReset + if value.IsStaleNaN(expectedSample.h.Sum) { + require.True(t, value.IsStaleNaN(encodedSample.h.Sum), fmt.Sprintf("at idx %d", i)) + continue + } + require.Equal(t, *expectedSample.h, *encodedSample.h.Compact(0), fmt.Sprintf("at idx %d", i)) + case fhSample: + encodedSample, ok := encodedSamples[i].(fhSample) + require.True(t, ok, "expect float histogram", fmt.Sprintf("at idx %d", i)) + // Ignore counter reset here, will check on chunk level. + encodedSample.fh.CounterResetHint = histogram.UnknownCounterReset + if value.IsStaleNaN(expectedSample.fh.Sum) { + require.True(t, value.IsStaleNaN(encodedSample.fh.Sum), fmt.Sprintf("at idx %d", i)) + continue + } + require.Equal(t, *expectedSample.fh, *encodedSample.fh.Compact(0), fmt.Sprintf("at idx %d", i)) + default: + t.Error("internal error, unexpected type") + } + } + + // If a counter reset hint is expected, it can only be found in the second chunk. + // Otherwise, we assert an unknown counter reset hint in all chunks. + if test.expectedCounterReset { + require.Equal(t, chunkenc.UnknownCounterReset, getCounterResetHint(chks[0])) + require.Equal(t, chunkenc.CounterReset, getCounterResetHint(chks[1])) + } else { + for _, chk := range chks { + require.Equal(t, chunkenc.UnknownCounterReset, getCounterResetHint(chk)) + } + } +} + +func expandHistogramSamples(chunks []chunks.Meta) (result []tsdbutil.Sample) { + if len(chunks) == 0 { + return + } + + for _, chunk := range chunks { + it := chunk.Chunk.Iterator(nil) + for vt := it.Next(); vt != chunkenc.ValNone; vt = it.Next() { + switch vt { + case chunkenc.ValHistogram: + t, h := it.AtHistogram() + result = append(result, hSample{t: t, h: h}) + case chunkenc.ValFloatHistogram: + t, fh := it.AtFloatHistogram() + result = append(result, fhSample{t: t, fh: fh}) + default: + panic("unexpected value type") + } + } + } + return +} + +func getCounterResetHint(chunk chunks.Meta) chunkenc.CounterResetHeader { + switch chk := chunk.Chunk.(type) { + case *chunkenc.HistogramChunk: + return chk.GetCounterResetHeader() + case *chunkenc.FloatHistogramChunk: + return chk.GetCounterResetHeader() + } + return chunkenc.UnknownCounterReset +} From 0ab95536115adfe50af249d36d73674be694ca3f Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 1 May 2023 16:43:15 +0100 Subject: [PATCH 117/632] tsdb: drop deleted series from the WAL sooner (#12297) `head.deleted` holds the WAL segment in use at the time each series was removed from the head. At the end of `truncateWAL()` we will delete all segments up to `last`, so we can drop any series that were last seen in a segment at or before that point. (same change in Prometheus Agent too) Signed-off-by: Bryan Boreham --- tsdb/agent/db.go | 4 ++-- tsdb/head.go | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 3343ee18e..13cad6bfc 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -665,7 +665,7 @@ func (db *DB) truncate(mint int64) error { } seg, ok := db.deleted[id] - return ok && seg >= first + return ok && seg > last } db.metrics.checkpointCreationTotal.Inc() @@ -687,7 +687,7 @@ func (db *DB) truncate(mint int64) error { // The checkpoint is written and segments before it are truncated, so we // no longer need to track deleted series that were being kept around. for ref, segment := range db.deleted { - if segment < first { + if segment <= last { delete(db.deleted, ref) } } diff --git a/tsdb/head.go b/tsdb/head.go index 5bd5bbccc..f839adb72 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -1212,9 +1212,9 @@ func (h *Head) truncateWAL(mint int64) error { return true } h.deletedMtx.Lock() - _, ok := h.deleted[id] + keepUntil, ok := h.deleted[id] h.deletedMtx.Unlock() - return ok + return ok && keepUntil > last } h.metrics.checkpointCreationTotal.Inc() if _, err = wlog.Checkpoint(h.logger, h.wal, first, last, keep, mint); err != nil { @@ -1235,7 +1235,7 @@ func (h *Head) truncateWAL(mint int64) error { // longer need to track deleted series that are before it. h.deletedMtx.Lock() for ref, segment := range h.deleted { - if segment < first { + if segment <= last { delete(h.deleted, ref) } } From 1068be199149c8c1ef3bcef66cfdf6c3bdcf1fdc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 May 2023 23:57:09 +0000 Subject: [PATCH 118/632] build(deps): bump bufbuild/buf-setup-action from 1.16.0 to 1.17.0 Bumps [bufbuild/buf-setup-action](https://github.com/bufbuild/buf-setup-action) from 1.16.0 to 1.17.0. - [Release notes](https://github.com/bufbuild/buf-setup-action/releases) - [Commits](https://github.com/bufbuild/buf-setup-action/compare/v1.16.0...v1.17.0) --- updated-dependencies: - dependency-name: bufbuild/buf-setup-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/buf-lint.yml | 2 +- .github/workflows/buf.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index 3275d08fc..79430ee56 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.16.0 + - uses: bufbuild/buf-setup-action@v1.17.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@v1 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index c80183b18..06e53172e 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -9,7 +9,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.16.0 + - uses: bufbuild/buf-setup-action@v1.17.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@v1 From 1cbe1d85666285ffd84cf776d52e00f2ac73c59c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 May 2023 23:58:14 +0000 Subject: [PATCH 119/632] build(deps): bump github.com/digitalocean/godo from 1.98.0 to 1.99.0 Bumps [github.com/digitalocean/godo](https://github.com/digitalocean/godo) from 1.98.0 to 1.99.0. - [Release notes](https://github.com/digitalocean/godo/releases) - [Changelog](https://github.com/digitalocean/godo/blob/main/CHANGELOG.md) - [Commits](https://github.com/digitalocean/godo/compare/v1.98.0...v1.99.0) --- updated-dependencies: - dependency-name: github.com/digitalocean/godo dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7b309b03f..f4f22239d 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/aws/aws-sdk-go v1.44.245 github.com/cespare/xxhash/v2 v2.2.0 github.com/dennwc/varint v1.0.0 - github.com/digitalocean/godo v1.98.0 + github.com/digitalocean/godo v1.99.0 github.com/docker/docker v23.0.4+incompatible github.com/edsrzf/mmap-go v1.1.0 github.com/envoyproxy/go-control-plane v0.11.0 diff --git a/go.sum b/go.sum index c699aa9df..753cc2ea1 100644 --- a/go.sum +++ b/go.sum @@ -149,8 +149,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/digitalocean/godo v1.98.0 h1:potyC1eD0N9n5/P4/WmJuKgg+OGYZOBWEW+/aKTX6QQ= -github.com/digitalocean/godo v1.98.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA= +github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E= +github.com/digitalocean/godo v1.99.0/go.mod h1:SsS2oXo2rznfM/nORlZ/6JaUJZFhmKTib1YhopUc8NA= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= From 6dfcd002ae3765be1099e1eecc1c98b15aa69acb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 May 2023 23:58:22 +0000 Subject: [PATCH 120/632] build(deps): bump github.com/hetznercloud/hcloud-go Bumps [github.com/hetznercloud/hcloud-go](https://github.com/hetznercloud/hcloud-go) from 1.42.0 to 1.43.0. - [Release notes](https://github.com/hetznercloud/hcloud-go/releases) - [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/hetznercloud/hcloud-go/compare/v1.42.0...v1.43.0) --- updated-dependencies: - dependency-name: github.com/hetznercloud/hcloud-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7b309b03f..240e81afd 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/consul/api v1.20.0 github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 - github.com/hetznercloud/hcloud-go v1.42.0 + github.com/hetznercloud/hcloud-go v1.43.0 github.com/ionos-cloud/sdk-go/v6 v6.1.6 github.com/json-iterator/go v1.1.12 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b diff --git a/go.sum b/go.sum index c699aa9df..b44b5f4cb 100644 --- a/go.sum +++ b/go.sum @@ -456,8 +456,8 @@ github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197/go.mod h1:2TCr github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go v1.42.0 h1:Es/CDOForQN3nOOP5Vxh1N/YHjpCg386iYEX5zCgi+A= -github.com/hetznercloud/hcloud-go v1.42.0/go.mod h1:YADL8AbmQYH0Eo+1lkuyoc8LutT0UeMvaKP47nNUb+Y= +github.com/hetznercloud/hcloud-go v1.43.0 h1:m4p5+mz32Tt+bHkNQEg9RQdtMIu+SUdMjs29LsOJjUk= +github.com/hetznercloud/hcloud-go v1.43.0/go.mod h1:DPs7Dvae8LrTVOWyq2abwQQOwfpfICAzKHm2HQMU5/E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= From 8deface2da9c234e69de99e4d76446b4ee09acea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 May 2023 00:00:22 +0000 Subject: [PATCH 121/632] build(deps): bump github.com/prometheus/client_golang Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.14.0 to 1.15.0. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.14.0...v1.15.0) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 6 +++--- documentation/examples/remote_storage/go.sum | 14 +++++--------- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 5e0e6ddb1..1db3e9a9e 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -8,7 +8,7 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb v1.11.0 - github.com/prometheus/client_golang v1.14.0 + github.com/prometheus/client_golang v1.15.0 github.com/prometheus/common v0.42.0 github.com/prometheus/prometheus v0.43.0 github.com/stretchr/testify v1.8.2 @@ -29,7 +29,7 @@ require ( github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect - github.com/kr/pretty v0.3.0 // indirect + github.com/kr/text v0.2.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/pkg/errors v0.9.1 // indirect @@ -51,7 +51,7 @@ require ( golang.org/x/text v0.8.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.29.0 // indirect + google.golang.org/protobuf v1.30.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 11c294515..c4350e78b 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -141,8 +141,7 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -181,8 +180,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM= +github.com/prometheus/client_golang v1.15.0/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -204,7 +203,6 @@ github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJf github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/prometheus v0.43.0 h1:18iCSfrbAHbXvYFvR38U1Pt4uZmU9SmDcCpCrBKUiGg= github.com/prometheus/prometheus v0.43.0/go.mod h1:2BA14LgBeqlPuzObSEbh+Y+JwLH2GcqDlJKbF2sA6FM= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 h1:yFl3jyaSVLNYXlnNYM5z2pagEk1dYQhfr1p20T1NyKY= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -341,14 +339,12 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.29.0 h1:44S3JjaKmLEE4YIkjzexaP+NzZsudE3Zin5Njn/pYX0= -google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= From 6985dcbe73561081e2e47541db99dfa1f8bd8831 Mon Sep 17 00:00:00 2001 From: Justin Lei Date: Mon, 10 Apr 2023 13:07:43 -0700 Subject: [PATCH 122/632] Optimize and test MemoizedSeriesIterator Signed-off-by: Justin Lei --- promql/engine.go | 2 +- storage/memoized_iterator.go | 26 +++++----- storage/memoized_iterator_test.go | 82 +++++++++++++++++++++++-------- 3 files changed, 75 insertions(+), 35 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index cbeeb82a1..688048e7a 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1857,7 +1857,7 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, no } if valueType == chunkenc.ValNone || t > refTime { var ok bool - t, v, _, h, ok = it.PeekPrev() + t, v, h, ok = it.PeekPrev() if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) { return 0, 0, nil, false } diff --git a/storage/memoized_iterator.go b/storage/memoized_iterator.go index 88eee0756..465afa91c 100644 --- a/storage/memoized_iterator.go +++ b/storage/memoized_iterator.go @@ -31,12 +31,7 @@ type MemoizedSeriesIterator struct { // Keep track of the previously returned value. prevTime int64 prevValue float64 - prevHistogram *histogram.Histogram prevFloatHistogram *histogram.FloatHistogram - // TODO(beorn7): MemoizedSeriesIterator is currently only used by the - // PromQL engine, which only works with FloatHistograms. For better - // performance, we could change MemoizedSeriesIterator to also only - // handle FloatHistograms. } // NewMemoizedEmptyIterator is like NewMemoizedIterator but it's initialised with an empty iterator. @@ -66,11 +61,11 @@ func (b *MemoizedSeriesIterator) Reset(it chunkenc.Iterator) { // PeekPrev returns the previous element of the iterator. If there is none buffered, // ok is false. -func (b *MemoizedSeriesIterator) PeekPrev() (t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, ok bool) { +func (b *MemoizedSeriesIterator) PeekPrev() (t int64, v float64, fh *histogram.FloatHistogram, ok bool) { if b.prevTime == math.MinInt64 { - return 0, 0, nil, nil, false + return 0, 0, nil, false } - return b.prevTime, b.prevValue, b.prevHistogram, b.prevFloatHistogram, true + return b.prevTime, b.prevValue, b.prevFloatHistogram, true } // Seek advances the iterator to the element at time t or greater. @@ -108,15 +103,14 @@ func (b *MemoizedSeriesIterator) Next() chunkenc.ValueType { return chunkenc.ValNone case chunkenc.ValFloat: b.prevTime, b.prevValue = b.it.At() - b.prevHistogram = nil b.prevFloatHistogram = nil case chunkenc.ValHistogram: b.prevValue = 0 - b.prevTime, b.prevHistogram = b.it.AtHistogram() - _, b.prevFloatHistogram = b.it.AtFloatHistogram() + ts, h := b.it.AtHistogram() + b.prevTime = ts + b.prevFloatHistogram = h.ToFloat() case chunkenc.ValFloatHistogram: b.prevValue = 0 - b.prevHistogram = nil b.prevTime, b.prevFloatHistogram = b.it.AtFloatHistogram() } @@ -132,13 +126,17 @@ func (b *MemoizedSeriesIterator) At() (int64, float64) { return b.it.At() } -// AtHistogram returns the current histogram element of the iterator. +// AtHistogram is not supported by this iterator. func (b *MemoizedSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { - return b.it.AtHistogram() + panic("AtHistogram is not supported by this iterator.") } // AtFloatHistogram returns the current float-histogram element of the iterator. func (b *MemoizedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { + if b.valueType == chunkenc.ValHistogram { + ts, h := b.it.AtHistogram() + return ts, h.ToFloat() + } return b.it.AtFloatHistogram() } diff --git a/storage/memoized_iterator_test.go b/storage/memoized_iterator_test.go index d996436e0..f922aaeec 100644 --- a/storage/memoized_iterator_test.go +++ b/storage/memoized_iterator_test.go @@ -16,25 +16,36 @@ package storage import ( "testing" - "github.com/stretchr/testify/require" - + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/tsdbutil" + + "github.com/stretchr/testify/require" ) func TestMemoizedSeriesIterator(t *testing.T) { - // TODO(beorn7): Include histograms in testing. var it *MemoizedSeriesIterator - sampleEq := func(ets int64, ev float64) { - ts, v := it.At() - require.Equal(t, ets, ts, "timestamp mismatch") - require.Equal(t, ev, v, "value mismatch") + sampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram) { + if efh == nil { + ts, v := it.At() + require.Equal(t, ets, ts, "timestamp mismatch") + require.Equal(t, ev, v, "value mismatch") + } else { + ts, fh := it.AtFloatHistogram() + require.Equal(t, ets, ts, "timestamp mismatch") + require.Equal(t, efh, fh, "histogram mismatch") + } } - prevSampleEq := func(ets int64, ev float64, eok bool) { - ts, v, _, _, ok := it.PeekPrev() + prevSampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram, eok bool) { + ts, v, fh, ok := it.PeekPrev() require.Equal(t, eok, ok, "exist mismatch") require.Equal(t, ets, ts, "timestamp mismatch") - require.Equal(t, ev, v, "value mismatch") + if efh == nil { + require.Equal(t, ev, v, "value mismatch") + } else { + require.Equal(t, efh, fh, "histogram mismatch") + } } it = NewMemoizedIterator(NewListSeriesIterator(samples{ @@ -46,29 +57,60 @@ func TestMemoizedSeriesIterator(t *testing.T) { fSample{t: 99, f: 8}, fSample{t: 100, f: 9}, fSample{t: 101, f: 10}, + hSample{t: 102, h: tsdbutil.GenerateTestHistogram(0)}, + hSample{t: 103, h: tsdbutil.GenerateTestHistogram(1)}, + fhSample{t: 104, fh: tsdbutil.GenerateTestFloatHistogram(2)}, + fhSample{t: 199, fh: tsdbutil.GenerateTestFloatHistogram(3)}, + hSample{t: 200, h: tsdbutil.GenerateTestHistogram(4)}, + fhSample{t: 299, fh: tsdbutil.GenerateTestFloatHistogram(5)}, + fSample{t: 300, f: 11}, }), 2) require.Equal(t, it.Seek(-123), chunkenc.ValFloat, "seek failed") - sampleEq(1, 2) - prevSampleEq(0, 0, false) + sampleEq(1, 2, nil) + prevSampleEq(0, 0, nil, false) require.Equal(t, it.Next(), chunkenc.ValFloat, "next failed") - sampleEq(2, 3) - prevSampleEq(1, 2, true) + sampleEq(2, 3, nil) + prevSampleEq(1, 2, nil, true) require.Equal(t, it.Next(), chunkenc.ValFloat, "next failed") require.Equal(t, it.Next(), chunkenc.ValFloat, "next failed") require.Equal(t, it.Next(), chunkenc.ValFloat, "next failed") - sampleEq(5, 6) - prevSampleEq(4, 5, true) + sampleEq(5, 6, nil) + prevSampleEq(4, 5, nil, true) require.Equal(t, it.Seek(5), chunkenc.ValFloat, "seek failed") - sampleEq(5, 6) - prevSampleEq(4, 5, true) + sampleEq(5, 6, nil) + prevSampleEq(4, 5, nil, true) require.Equal(t, it.Seek(101), chunkenc.ValFloat, "seek failed") - sampleEq(101, 10) - prevSampleEq(100, 9, true) + sampleEq(101, 10, nil) + prevSampleEq(100, 9, nil, true) + + require.Equal(t, chunkenc.ValHistogram, it.Next(), "next failed") + sampleEq(102, 0, tsdbutil.GenerateTestFloatHistogram(0)) + prevSampleEq(101, 10, nil, true) + + require.Equal(t, chunkenc.ValHistogram, it.Next(), "next failed") + sampleEq(103, 0, tsdbutil.GenerateTestFloatHistogram(1)) + prevSampleEq(102, 0, tsdbutil.GenerateTestFloatHistogram(0), true) + + require.Equal(t, chunkenc.ValFloatHistogram, it.Seek(104), "seek failed") + sampleEq(104, 0, tsdbutil.GenerateTestFloatHistogram(2)) + prevSampleEq(103, 0, tsdbutil.GenerateTestFloatHistogram(1), true) + + require.Equal(t, chunkenc.ValFloatHistogram, it.Next(), "next failed") + sampleEq(199, 0, tsdbutil.GenerateTestFloatHistogram(3)) + prevSampleEq(104, 0, tsdbutil.GenerateTestFloatHistogram(2), true) + + require.Equal(t, chunkenc.ValFloatHistogram, it.Seek(280), "seek failed") + sampleEq(299, 0, tsdbutil.GenerateTestFloatHistogram(5)) + prevSampleEq(0, 0, nil, false) + + require.Equal(t, chunkenc.ValFloat, it.Next(), "next failed") + sampleEq(300, 11, nil) + prevSampleEq(299, 0, tsdbutil.GenerateTestFloatHistogram(5), true) require.Equal(t, it.Next(), chunkenc.ValNone, "next succeeded unexpectedly") require.Equal(t, it.Seek(1024), chunkenc.ValNone, "seek succeeded unexpectedly") From 7a48a266b675d7531234c66b54f31e7c71bcd341 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 3 May 2023 11:59:27 +0100 Subject: [PATCH 123/632] labels: respect Set after Del in Builder (#12322) * labels: respect Set after Del in Builder The implementations are not symmetric between `Set()` and `Del()`, so we must be careful. Add tests for this, both in labels and in relabel where the issue was reported. Also make the slice implementation consistent re `slices.Contains`. Signed-off-by: Bryan Boreham --- model/labels/labels.go | 9 ++++----- model/labels/labels_string.go | 7 ++++--- model/labels/labels_test.go | 7 +++++++ model/relabel/relabel_test.go | 28 ++++++++++++++++++++++++++++ 4 files changed, 43 insertions(+), 8 deletions(-) diff --git a/model/labels/labels.go b/model/labels/labels.go index 93524ddcf..9ac0e5b53 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -533,16 +533,15 @@ func (b *Builder) Set(n, v string) *Builder { } func (b *Builder) Get(n string) string { - for _, d := range b.del { - if d == n { - return "" - } - } + // Del() removes entries from .add but Set() does not remove from .del, so check .add first. for _, a := range b.add { if a.Name == n { return a.Value } } + if slices.Contains(b.del, n) { + return "" + } return b.base.Get(n) } diff --git a/model/labels/labels_string.go b/model/labels/labels_string.go index ff46103eb..6d54e98ab 100644 --- a/model/labels/labels_string.go +++ b/model/labels/labels_string.go @@ -593,14 +593,15 @@ func (b *Builder) Set(n, v string) *Builder { } func (b *Builder) Get(n string) string { - if slices.Contains(b.del, n) { - return "" - } + // Del() removes entries from .add but Set() does not remove from .del, so check .add first. for _, a := range b.add { if a.Name == n { return a.Value } } + if slices.Contains(b.del, n) { + return "" + } return b.base.Get(n) } diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index 9e60c2251..108d8b0de 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -607,6 +607,13 @@ func TestBuilder(t *testing.T) { require.Equal(t, tcase.want.BytesWithoutLabels(nil, "aaa", "bbb"), b.Labels().Bytes(nil)) }) } + t.Run("set_after_del", func(t *testing.T) { + b := NewBuilder(FromStrings("aaa", "111")) + b.Del("bbb") + b.Set("bbb", "222") + require.Equal(t, FromStrings("aaa", "111", "bbb", "222"), b.Labels()) + require.Equal(t, "222", b.Get("bbb")) + }) } func TestScratchBuilder(t *testing.T) { diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index d277d778d..b50ff4010 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -397,6 +397,34 @@ func TestRelabel(t *testing.T) { "foo": "bar", }), }, + { // From https://github.com/prometheus/prometheus/issues/12283 + input: labels.FromMap(map[string]string{ + "__meta_kubernetes_pod_container_port_name": "foo", + "__meta_kubernetes_pod_annotation_XXX_metrics_port": "9091", + }), + relabel: []*Config{ + { + Regex: MustNewRegexp("^__meta_kubernetes_pod_container_port_name$"), + Action: LabelDrop, + }, + { + SourceLabels: model.LabelNames{"__meta_kubernetes_pod_annotation_XXX_metrics_port"}, + Regex: MustNewRegexp("(.+)"), + Action: Replace, + Replacement: "metrics", + TargetLabel: "__meta_kubernetes_pod_container_port_name", + }, + { + SourceLabels: model.LabelNames{"__meta_kubernetes_pod_container_port_name"}, + Regex: MustNewRegexp("^metrics$"), + Action: Keep, + }, + }, + output: labels.FromMap(map[string]string{ + "__meta_kubernetes_pod_annotation_XXX_metrics_port": "9091", + "__meta_kubernetes_pod_container_port_name": "metrics", + }), + }, { input: labels.FromMap(map[string]string{ "a": "foo", From 3d26faade4069b70b5a5cc3a53b56c7654c38a49 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 3 May 2023 16:18:28 +0100 Subject: [PATCH 124/632] Create 2.44.0-rc.1 (#12323) Signed-off-by: Bryan Boreham --- CHANGELOG.md | 4 ++++ VERSION | 2 +- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/react-app/package.json | 4 ++-- 6 files changed, 17 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8fc24345c..06fb01ad0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## 2.44.0-rc.1 / 2023-05-03 + +* [BUGFIX] Labels: Set after Del would be ignored, which broke some relabeling rules. #12322 + ## 2.44.0-rc.0 / 2023-04-22 This version is built with Go tag `stringlabels`, to use the smaller data diff --git a/VERSION b/VERSION index a4494d974..662635da4 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.44.0-rc.0 +2.44.0-rc.1 diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 6521c8729..11fdeee45 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.44.0-rc.0", + "version": "0.44.0-rc.1", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.44.0-rc.0", + "@prometheus-io/lezer-promql": "0.44.0-rc.1", "lru-cache": "^6.0.0" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index be92ac46f..4b3f29b52 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.44.0-rc.0", + "version": "0.44.0-rc.1", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 09a4fd3fe..37804eebf 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -28,10 +28,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.44.0-rc.0", + "version": "0.44.0-rc.1", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.44.0-rc.0", + "@prometheus-io/lezer-promql": "0.44.0-rc.1", "lru-cache": "^6.0.0" }, "devDependencies": { @@ -61,7 +61,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.44.0-rc.0", + "version": "0.44.0-rc.1", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.2.2", @@ -20763,7 +20763,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.44.0-rc.0", + "version": "0.44.0-rc.1", "dependencies": { "@codemirror/autocomplete": "^6.4.0", "@codemirror/commands": "^6.2.0", @@ -20781,7 +20781,7 @@ "@lezer/lr": "^1.3.1", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.44.0-rc.0", + "@prometheus-io/codemirror-promql": "0.44.0-rc.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.2.0", @@ -23417,7 +23417,7 @@ "@lezer/lr": "^1.3.1", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.44.0-rc.0", + "@prometheus-io/codemirror-promql": "0.44.0-rc.1", "@testing-library/react-hooks": "^7.0.2", "@types/enzyme": "^3.10.12", "@types/flot": "0.0.32", @@ -23468,7 +23468,7 @@ "@lezer/common": "^1.0.2", "@lezer/highlight": "^1.1.3", "@lezer/lr": "^1.3.1", - "@prometheus-io/lezer-promql": "0.44.0-rc.0", + "@prometheus-io/lezer-promql": "0.44.0-rc.1", "@types/lru-cache": "^5.1.1", "isomorphic-fetch": "^3.0.0", "lru-cache": "^6.0.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index b4929d664..57e4d8120 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.44.0-rc.0", + "version": "0.44.0-rc.1", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.4.0", @@ -19,7 +19,7 @@ "@lezer/common": "^1.0.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.44.0-rc.0", + "@prometheus-io/codemirror-promql": "0.44.0-rc.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.2.0", From b0272255b735ec5bbd04f0bc26c157febd7678a2 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 3 May 2023 20:06:12 +0200 Subject: [PATCH 125/632] storage: optimise sampleRing Replace many checks for the lengths of slices with a single tracking variable. Signed-off-by: beorn7 --- storage/buffer.go | 164 ++++++++++++++++++++++++++++------------------ 1 file changed, 99 insertions(+), 65 deletions(-) diff --git a/storage/buffer.go b/storage/buffer.go index 2229e5259..38f559103 100644 --- a/storage/buffer.go +++ b/storage/buffer.go @@ -242,15 +242,16 @@ func (s fhSample) Type() chunkenc.ValueType { type sampleRing struct { delta int64 - // Lookback buffers. We use buf for mixed samples, but one of the three + // Lookback buffers. We use iBuf for mixed samples, but one of the three // concrete ones for homogenous samples. (Only one of the four bufs is // allowed to be populated!) This avoids the overhead of the interface // wrapper for the happy (and by far most common) case of homogenous // samples. - buf []tsdbutil.Sample - fBuf []fSample - hBuf []hSample - fhBuf []fhSample + iBuf []tsdbutil.Sample + fBuf []fSample + hBuf []hSample + fhBuf []fhSample + bufInUse bufType i int // Position of most recent element in ring buffer. f int // Position of first element in ring buffer. @@ -259,6 +260,16 @@ type sampleRing struct { it sampleRingIterator } +type bufType int + +const ( + noBuf bufType = iota // Nothing yet stored in sampleRing. + iBuf + fBuf + hBuf + fhBuf +) + // newSampleRing creates a new sampleRing. If you do not know the prefereed // value type yet, use a size of 0 (in which case the provided typ doesn't // matter). On the first add, a buffer of size 16 will be allocated with the @@ -278,7 +289,7 @@ func newSampleRing(delta int64, size int, typ chunkenc.ValueType) *sampleRing { case chunkenc.ValFloatHistogram: r.fhBuf = make([]fhSample, size) default: - r.buf = make([]tsdbutil.Sample, size) + r.iBuf = make([]tsdbutil.Sample, size) } return r } @@ -287,6 +298,7 @@ func (r *sampleRing) reset() { r.l = 0 r.i = -1 r.f = 0 + r.bufInUse = noBuf } // Returns the current iterator. Invalidates previously returned iterators. @@ -310,18 +322,18 @@ func (it *sampleRingIterator) Next() chunkenc.ValueType { if it.i >= it.r.l { return chunkenc.ValNone } - switch { - case len(it.r.fBuf) > 0: + switch it.r.bufInUse { + case fBuf: s := it.r.atF(it.i) it.t = s.t it.f = s.f return chunkenc.ValFloat - case len(it.r.hBuf) > 0: + case hBuf: s := it.r.atH(it.i) it.t = s.t it.h = s.h return chunkenc.ValHistogram - case len(it.r.fhBuf) > 0: + case fhBuf: s := it.r.atFH(it.i) it.t = s.t it.fh = s.fh @@ -372,8 +384,8 @@ func (it *sampleRingIterator) AtT() int64 { } func (r *sampleRing) at(i int) tsdbutil.Sample { - j := (r.f + i) % len(r.buf) - return r.buf[j] + j := (r.f + i) % len(r.iBuf) + return r.iBuf[j] } func (r *sampleRing) atF(i int) fSample { @@ -397,91 +409,113 @@ func (r *sampleRing) atFH(i int) fhSample { // from this package (fSample, hSample, fhSample), call one of the specialized // methods addF, addH, or addFH for better performance. func (r *sampleRing) add(s tsdbutil.Sample) { - if len(r.buf) == 0 { + if r.bufInUse == noBuf { + // First sample. + switch s := s.(type) { + case fSample: + r.bufInUse = fBuf + r.fBuf = addF(s, r.fBuf, r) + case hSample: + r.bufInUse = hBuf + r.hBuf = addH(s, r.hBuf, r) + case fhSample: + r.bufInUse = fhBuf + r.fhBuf = addFH(s, r.fhBuf, r) + } + return + } + if r.bufInUse != iBuf { // Nothing added to the interface buf yet. Let's check if we can // stay specialized. switch s := s.(type) { case fSample: - if len(r.hBuf)+len(r.fhBuf) == 0 { + if r.bufInUse == fBuf { r.fBuf = addF(s, r.fBuf, r) return } case hSample: - if len(r.fBuf)+len(r.fhBuf) == 0 { + if r.bufInUse == hBuf { r.hBuf = addH(s, r.hBuf, r) return } case fhSample: - if len(r.fBuf)+len(r.hBuf) == 0 { + if r.bufInUse == fhBuf { r.fhBuf = addFH(s, r.fhBuf, r) return } } // The new sample isn't a fit for the already existing // ones. Copy the latter into the interface buffer where needed. - switch { - case len(r.fBuf) > 0: + switch r.bufInUse { + case fBuf: for _, s := range r.fBuf { - r.buf = append(r.buf, s) + r.iBuf = append(r.iBuf, s) } r.fBuf = nil - case len(r.hBuf) > 0: + case hBuf: for _, s := range r.hBuf { - r.buf = append(r.buf, s) + r.iBuf = append(r.iBuf, s) } r.hBuf = nil - case len(r.fhBuf) > 0: + case fhBuf: for _, s := range r.fhBuf { - r.buf = append(r.buf, s) + r.iBuf = append(r.iBuf, s) } r.fhBuf = nil } + r.bufInUse = iBuf } - r.buf = addSample(s, r.buf, r) + r.iBuf = addSample(s, r.iBuf, r) } // addF is a version of the add method specialized for fSample. func (r *sampleRing) addF(s fSample) { - switch { - case len(r.buf) > 0: - // Already have interface samples. Add to the interface buf. - r.buf = addSample(s, r.buf, r) - case len(r.hBuf)+len(r.fhBuf) > 0: + switch r.bufInUse { + case fBuf: // Add to existing fSamples. + r.fBuf = addF(s, r.fBuf, r) + case noBuf: // Add first sample. + r.fBuf = addF(s, r.fBuf, r) + r.bufInUse = fBuf + case iBuf: // Already have interface samples. Add to the interface buf. + r.iBuf = addSample(s, r.iBuf, r) + default: // Already have specialized samples that are not fSamples. // Need to call the checked add method for conversion. r.add(s) - default: - r.fBuf = addF(s, r.fBuf, r) } } // addH is a version of the add method specialized for hSample. func (r *sampleRing) addH(s hSample) { - switch { - case len(r.buf) > 0: - // Already have interface samples. Add to the interface buf. - r.buf = addSample(s, r.buf, r) - case len(r.fBuf)+len(r.fhBuf) > 0: - // Already have samples that are not hSamples. + switch r.bufInUse { + case hBuf: // Add to existing hSamples. + r.hBuf = addH(s, r.hBuf, r) + case noBuf: // Add first sample. + r.hBuf = addH(s, r.hBuf, r) + r.bufInUse = hBuf + case iBuf: // Already have interface samples. Add to the interface buf. + r.iBuf = addSample(s, r.iBuf, r) + default: + // Already have specialized samples that are not hSamples. // Need to call the checked add method for conversion. r.add(s) - default: - r.hBuf = addH(s, r.hBuf, r) } } // addFH is a version of the add method specialized for fhSample. func (r *sampleRing) addFH(s fhSample) { - switch { - case len(r.buf) > 0: - // Already have interface samples. Add to the interface buf. - r.buf = addSample(s, r.buf, r) - case len(r.fBuf)+len(r.hBuf) > 0: - // Already have samples that are not fhSamples. + switch r.bufInUse { + case fhBuf: // Add to existing fhSamples. + r.fhBuf = addFH(s, r.fhBuf, r) + case noBuf: // Add first sample. + r.fhBuf = addFH(s, r.fhBuf, r) + r.bufInUse = fhBuf + case iBuf: // Already have interface samples. Add to the interface buf. + r.iBuf = addSample(s, r.iBuf, r) + default: + // Already have specialized samples that are not fhSamples. // Need to call the checked add method for conversion. r.add(s) - default: - r.fhBuf = addFH(s, r.fhBuf, r) } } @@ -701,15 +735,15 @@ func (r *sampleRing) reduceDelta(delta int64) bool { return true } - switch { - case len(r.fBuf) > 0: + switch r.bufInUse { + case fBuf: genericReduceDelta(r.fBuf, r) - case len(r.hBuf) > 0: + case hBuf: genericReduceDelta(r.hBuf, r) - case len(r.fhBuf) > 0: + case fhBuf: genericReduceDelta(r.fhBuf, r) default: - genericReduceDelta(r.buf, r) + genericReduceDelta(r.iBuf, r) } return true } @@ -733,12 +767,12 @@ func (r *sampleRing) nthLast(n int) (tsdbutil.Sample, bool) { return fSample{}, false } i := r.l - n - switch { - case len(r.fBuf) > 0: + switch r.bufInUse { + case fBuf: return r.atF(i), true - case len(r.hBuf) > 0: + case hBuf: return r.atH(i), true - case len(r.fhBuf) > 0: + case fhBuf: return r.atFH(i), true default: return r.at(i), true @@ -751,15 +785,15 @@ func (r *sampleRing) samples() []tsdbutil.Sample { k := r.f + r.l var j int - switch { - case len(r.buf) > 0: - if k > len(r.buf) { - k = len(r.buf) + switch r.bufInUse { + case iBuf: + if k > len(r.iBuf) { + k = len(r.iBuf) j = r.l - k + r.f } - n := copy(res, r.buf[r.f:k]) - copy(res[n:], r.buf[:j]) - case len(r.fBuf) > 0: + n := copy(res, r.iBuf[r.f:k]) + copy(res[n:], r.iBuf[:j]) + case fBuf: if k > len(r.fBuf) { k = len(r.fBuf) j = r.l - k + r.f @@ -770,7 +804,7 @@ func (r *sampleRing) samples() []tsdbutil.Sample { for i, s := range resF { res[i] = s } - case len(r.hBuf) > 0: + case hBuf: if k > len(r.hBuf) { k = len(r.hBuf) j = r.l - k + r.f @@ -781,7 +815,7 @@ func (r *sampleRing) samples() []tsdbutil.Sample { for i, s := range resH { res[i] = s } - case len(r.fhBuf) > 0: + case fhBuf: if k > len(r.fhBuf) { k = len(r.fhBuf) j = r.l - k + r.f From 7bbf24b707966f6577e37beb95634bc58bae4f42 Mon Sep 17 00:00:00 2001 From: Justin Lei Date: Sun, 30 Apr 2023 13:13:25 -0700 Subject: [PATCH 126/632] Make MemoizedSeriesIterator not implement chunkenc.Iterator Signed-off-by: Justin Lei --- promql/engine.go | 2 +- storage/memoized_iterator.go | 35 ++++++++------------- storage/memoized_iterator_test.go | 52 ++++++++++++------------------- 3 files changed, 34 insertions(+), 55 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 688048e7a..ae46f6005 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1850,7 +1850,7 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, no } case chunkenc.ValFloat: t, v = it.At() - case chunkenc.ValHistogram, chunkenc.ValFloatHistogram: + case chunkenc.ValFloatHistogram: t, h = it.AtFloatHistogram() default: panic(fmt.Errorf("unknown value type %v", valueType)) diff --git a/storage/memoized_iterator.go b/storage/memoized_iterator.go index 465afa91c..cb9fdeef4 100644 --- a/storage/memoized_iterator.go +++ b/storage/memoized_iterator.go @@ -21,6 +21,9 @@ import ( ) // MemoizedSeriesIterator wraps an iterator with a buffer to look back the previous element. +// +// This iterator regards integer histograms as float histograms; calls to Seek() will never return chunkenc.Histogram. +// This iterator deliberately does not implement chunkenc.Iterator. type MemoizedSeriesIterator struct { it chunkenc.Iterator delta int64 @@ -78,8 +81,11 @@ func (b *MemoizedSeriesIterator) Seek(t int64) chunkenc.ValueType { b.prevTime = math.MinInt64 b.valueType = b.it.Seek(t0) - if b.valueType == chunkenc.ValNone { + switch b.valueType { + case chunkenc.ValNone: return chunkenc.ValNone + case chunkenc.ValHistogram: + b.valueType = chunkenc.ValFloatHistogram } b.lastTime = b.it.AtT() } @@ -95,7 +101,8 @@ func (b *MemoizedSeriesIterator) Seek(t int64) chunkenc.ValueType { return chunkenc.ValNone } -// Next advances the iterator to the next element. +// Next advances the iterator to the next element. Note that this does not check whether the element being buffered is +// within the time range of the current element and the duration of delta before. func (b *MemoizedSeriesIterator) Next() chunkenc.ValueType { // Keep track of the previous element. switch b.valueType { @@ -104,12 +111,7 @@ func (b *MemoizedSeriesIterator) Next() chunkenc.ValueType { case chunkenc.ValFloat: b.prevTime, b.prevValue = b.it.At() b.prevFloatHistogram = nil - case chunkenc.ValHistogram: - b.prevValue = 0 - ts, h := b.it.AtHistogram() - b.prevTime = ts - b.prevFloatHistogram = h.ToFloat() - case chunkenc.ValFloatHistogram: + case chunkenc.ValHistogram, chunkenc.ValFloatHistogram: b.prevValue = 0 b.prevTime, b.prevFloatHistogram = b.it.AtFloatHistogram() } @@ -118,6 +120,9 @@ func (b *MemoizedSeriesIterator) Next() chunkenc.ValueType { if b.valueType != chunkenc.ValNone { b.lastTime = b.it.AtT() } + if b.valueType == chunkenc.ValHistogram { + b.valueType = chunkenc.ValFloatHistogram + } return b.valueType } @@ -126,25 +131,11 @@ func (b *MemoizedSeriesIterator) At() (int64, float64) { return b.it.At() } -// AtHistogram is not supported by this iterator. -func (b *MemoizedSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { - panic("AtHistogram is not supported by this iterator.") -} - // AtFloatHistogram returns the current float-histogram element of the iterator. func (b *MemoizedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { - if b.valueType == chunkenc.ValHistogram { - ts, h := b.it.AtHistogram() - return ts, h.ToFloat() - } return b.it.AtFloatHistogram() } -// AtT returns the current timestamp of the iterator. -func (b *MemoizedSeriesIterator) AtT() int64 { - return b.it.AtT() -} - // Err returns the last encountered error. func (b *MemoizedSeriesIterator) Err() error { return b.it.Err() diff --git a/storage/memoized_iterator_test.go b/storage/memoized_iterator_test.go index f922aaeec..1c8711928 100644 --- a/storage/memoized_iterator_test.go +++ b/storage/memoized_iterator_test.go @@ -16,11 +16,11 @@ package storage import ( "testing" + "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/tsdbutil" - - "github.com/stretchr/testify/require" ) func TestMemoizedSeriesIterator(t *testing.T) { @@ -64,55 +64,43 @@ func TestMemoizedSeriesIterator(t *testing.T) { hSample{t: 200, h: tsdbutil.GenerateTestHistogram(4)}, fhSample{t: 299, fh: tsdbutil.GenerateTestFloatHistogram(5)}, fSample{t: 300, f: 11}, + hSample{t: 399, h: tsdbutil.GenerateTestHistogram(6)}, + fSample{t: 400, f: 12}, }), 2) require.Equal(t, it.Seek(-123), chunkenc.ValFloat, "seek failed") sampleEq(1, 2, nil) prevSampleEq(0, 0, nil, false) - require.Equal(t, it.Next(), chunkenc.ValFloat, "next failed") - sampleEq(2, 3, nil) - prevSampleEq(1, 2, nil, true) - - require.Equal(t, it.Next(), chunkenc.ValFloat, "next failed") - require.Equal(t, it.Next(), chunkenc.ValFloat, "next failed") - require.Equal(t, it.Next(), chunkenc.ValFloat, "next failed") - sampleEq(5, 6, nil) - prevSampleEq(4, 5, nil, true) - require.Equal(t, it.Seek(5), chunkenc.ValFloat, "seek failed") sampleEq(5, 6, nil) prevSampleEq(4, 5, nil, true) - require.Equal(t, it.Seek(101), chunkenc.ValFloat, "seek failed") - sampleEq(101, 10, nil) - prevSampleEq(100, 9, nil, true) - - require.Equal(t, chunkenc.ValHistogram, it.Next(), "next failed") - sampleEq(102, 0, tsdbutil.GenerateTestFloatHistogram(0)) + // Seek to a histogram sample with a previous float sample. + require.Equal(t, it.Seek(102), chunkenc.ValFloatHistogram, "seek failed") + sampleEq(102, 10, tsdbutil.GenerateTestFloatHistogram(0)) prevSampleEq(101, 10, nil, true) - require.Equal(t, chunkenc.ValHistogram, it.Next(), "next failed") - sampleEq(103, 0, tsdbutil.GenerateTestFloatHistogram(1)) - prevSampleEq(102, 0, tsdbutil.GenerateTestFloatHistogram(0), true) + // Attempt to seek backwards (no-op). + require.Equal(t, it.Seek(50), chunkenc.ValFloatHistogram, "seek failed") + sampleEq(102, 10, tsdbutil.GenerateTestFloatHistogram(0)) + prevSampleEq(101, 10, nil, true) - require.Equal(t, chunkenc.ValFloatHistogram, it.Seek(104), "seek failed") + // Seek to a float histogram sample with a previous histogram sample. + require.Equal(t, it.Seek(104), chunkenc.ValFloatHistogram, "seek failed") sampleEq(104, 0, tsdbutil.GenerateTestFloatHistogram(2)) prevSampleEq(103, 0, tsdbutil.GenerateTestFloatHistogram(1), true) - require.Equal(t, chunkenc.ValFloatHistogram, it.Next(), "next failed") - sampleEq(199, 0, tsdbutil.GenerateTestFloatHistogram(3)) - prevSampleEq(104, 0, tsdbutil.GenerateTestFloatHistogram(2), true) - - require.Equal(t, chunkenc.ValFloatHistogram, it.Seek(280), "seek failed") - sampleEq(299, 0, tsdbutil.GenerateTestFloatHistogram(5)) - prevSampleEq(0, 0, nil, false) - - require.Equal(t, chunkenc.ValFloat, it.Next(), "next failed") + // Seek to a float sample with a previous float histogram sample. + require.Equal(t, chunkenc.ValFloat, it.Seek(300), "seek failed") sampleEq(300, 11, nil) prevSampleEq(299, 0, tsdbutil.GenerateTestFloatHistogram(5), true) - require.Equal(t, it.Next(), chunkenc.ValNone, "next succeeded unexpectedly") + // Seek to a float sample with a previous histogram sample. + require.Equal(t, chunkenc.ValFloat, it.Seek(400), "seek failed") + sampleEq(400, 12, nil) + prevSampleEq(399, 0, tsdbutil.GenerateTestFloatHistogram(6), true) + require.Equal(t, it.Seek(1024), chunkenc.ValNone, "seek succeeded unexpectedly") } From e9b2d874431ca464f55ea11685e3278e4c19045a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Thu, 4 May 2023 08:16:37 +0200 Subject: [PATCH 127/632] Revert change to model/textparse/protobufparse_test.go MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- model/textparse/protobufparse_test.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index fb8669197..90c6a90f3 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -15,6 +15,7 @@ package textparse import ( "bytes" + "encoding/binary" "errors" "io" "testing" @@ -25,9 +26,8 @@ import ( "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/util/testutil" - dto "github.com/prometheus/client_model/go" + dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" ) func TestProtobufParse(t *testing.T) { @@ -449,6 +449,7 @@ metric: < `, } + varintBuf := make([]byte, binary.MaxVarintLen32) inputBuf := &bytes.Buffer{} for _, tmf := range textMetricFamilies { @@ -456,8 +457,13 @@ metric: < // From text to proto message. require.NoError(t, proto.UnmarshalText(tmf, pb)) // From proto message to binary protobuf. - err := testutil.AddMetricFamilyToProtobuf(inputBuf, pb) + protoBuf, err := proto.Marshal(pb) require.NoError(t, err) + + // Write first length, then binary protobuf. + varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) + inputBuf.Write(varintBuf[:varintLength]) + inputBuf.Write(protoBuf) } exp := []struct { From 19a4f314f53a80e653de6fcbf6b90d70a35fc93d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Thu, 4 May 2023 08:36:44 +0200 Subject: [PATCH 128/632] Refactor testutil/protobuf.go into scrape package MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Renamed to clientprotobuf.go and added comments to indicate the intended usage. Signed-off-by: György Krajcsovits --- util/testutil/protobuf.go => scrape/clientprotobuf.go | 9 ++++++--- scrape/scrape_test.go | 4 ++-- 2 files changed, 8 insertions(+), 5 deletions(-) rename util/testutil/protobuf.go => scrape/clientprotobuf.go (78%) diff --git a/util/testutil/protobuf.go b/scrape/clientprotobuf.go similarity index 78% rename from util/testutil/protobuf.go rename to scrape/clientprotobuf.go index 8650f1373..bf165e034 100644 --- a/util/testutil/protobuf.go +++ b/scrape/clientprotobuf.go @@ -11,17 +11,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -package testutil +package scrape import ( "bytes" "encoding/binary" "github.com/gogo/protobuf/proto" + // Intentionally using client model to simulate client in tests. dto "github.com/prometheus/client_model/go" ) -// Write a MetricFamily into a protobuf +// Write a MetricFamily into a protobuf. +// This function is intended for testing scraping by providing protobuf serialized input. func MetricFamilyToProtobuf(metricFamily *dto.MetricFamily) ([]byte, error) { buffer := &bytes.Buffer{} err := AddMetricFamilyToProtobuf(buffer, metricFamily) @@ -31,7 +33,8 @@ func MetricFamilyToProtobuf(metricFamily *dto.MetricFamily) ([]byte, error) { return buffer.Bytes(), nil } -// Append a MetricFamily protobuf representation to a buffer +// Append a MetricFamily protobuf representation to a buffer. +// This function is intended for testing scraping by providing protobuf serialized input. func AddMetricFamilyToProtobuf(buffer *bytes.Buffer, metricFamily *dto.MetricFamily) error { protoBuf, err := proto.Marshal(metricFamily) if err != nil { diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 745765ee8..fc3ad926e 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1766,7 +1766,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { require.NotEmpty(t, gathered) histogramMetricFamily := gathered[0] - msg, err := testutil.MetricFamilyToProtobuf(histogramMetricFamily) + msg, err := MetricFamilyToProtobuf(histogramMetricFamily) require.NoError(t, err) now := time.Now() @@ -1789,7 +1789,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { require.NotEmpty(t, gathered) histogramMetricFamily = gathered[0] - msg, err = testutil.MetricFamilyToProtobuf(histogramMetricFamily) + msg, err = MetricFamilyToProtobuf(histogramMetricFamily) require.NoError(t, err) now = time.Now() From 40240c9c1cb290fe95f1e61886b23fab860aeacd Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Fri, 5 May 2023 02:29:50 +0800 Subject: [PATCH 129/632] Update according to code review Signed-off-by: Jeanette Tan --- config/config.go | 2 +- docs/configuration/configuration.md | 8 ++++---- scrape/clientprotobuf.go | 1 + scrape/scrape.go | 14 +++++++------- 4 files changed, 13 insertions(+), 12 deletions(-) diff --git a/config/config.go b/config/config.go index 31ebb288e..1b6a6cf6b 100644 --- a/config/config.go +++ b/config/config.go @@ -491,7 +491,7 @@ type ScrapeConfig struct { LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"` // More than this many buckets in a native histogram will cause the scrape to // fail. - NativeHistogramBucketLimit uint `yaml:"bucket_limit,omitempty"` + NativeHistogramBucketLimit uint `yaml:"native_histogram_bucket_limit,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index cfb11e21e..0a8c4a5cd 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -377,10 +377,10 @@ metric_relabel_configs: # change in the future. [ target_limit: | default = 0 ] -# Limit on total number of positive and negative buckets allowed in a native -# histogram. If this is exceeded, the entire scrape will be treated as failed. -# 0 means no limit. -[ sample_limit: | default = 0 ] +# Limit on total number of positive and negative buckets allowed in a single +# native histogram. If this is exceeded, the entire scrape will be treated as +# failed. 0 means no limit. +[ native_histogram_bucket_limit: | default = 0 ] ``` Where `` must be unique across all scrape configurations. diff --git a/scrape/clientprotobuf.go b/scrape/clientprotobuf.go index bf165e034..2213268d5 100644 --- a/scrape/clientprotobuf.go +++ b/scrape/clientprotobuf.go @@ -18,6 +18,7 @@ import ( "encoding/binary" "github.com/gogo/protobuf/proto" + // Intentionally using client model to simulate client in tests. dto "github.com/prometheus/client_model/go" ) diff --git a/scrape/scrape.go b/scrape/scrape.go index 179adbb2a..f094ee825 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -193,8 +193,8 @@ var ( ) targetScrapeNativeHistogramBucketLimit = prometheus.NewCounter( prometheus.CounterOpts{ - Name: "prometheus_target_scrapes_histogram_exceeded_bucket_limit_total", - Help: "Total number of scrapes that hit the native histograms bucket limit and were rejected.", + Name: "prometheus_target_scrapes_exceeded_native_histogram_bucket_limit_total", + Help: "Total number of scrapes that hit the native histogram bucket limit and were rejected.", }, ) ) @@ -744,17 +744,17 @@ func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels } // appender returns an appender for ingested samples from the target. -func appender(app storage.Appender, limit, bucketLimit int) storage.Appender { +func appender(app storage.Appender, sampleLimit, bucketLimit int) storage.Appender { app = &timeLimitAppender{ Appender: app, maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), } - // The limit is applied after metrics are potentially dropped via relabeling. - if limit > 0 { + // The sampleLimit is applied after metrics are potentially dropped via relabeling. + if sampleLimit > 0 { app = &limitAppender{ Appender: app, - limit: limit, + limit: sampleLimit, } } @@ -1707,7 +1707,7 @@ loop: } if bucketLimitErr != nil { if err == nil { - err = bucketLimitErr // if sample limit is hit, that error takes precedence + err = bucketLimitErr // If sample limit is hit, that error takes precedence. } // We only want to increment this once per scrape, so this is Inc'd outside the loop. targetScrapeNativeHistogramBucketLimit.Inc() From e278195e3983c966c2a0f42211f62fa8f40c5561 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 4 May 2023 20:09:31 +0100 Subject: [PATCH 130/632] Cherry-pick bugfix #12322 and create v2.43.1 (#12324) * labels: respect Set after Del in Builder (#12322) The implementations are not symmetric between `Set()` and `Del()`, so we must be careful. Add tests for this, both in labels and in relabel where the issue was reported. Also make the slice implementation consistent re `slices.Contains`. * Create v2.43.1 with bugfix Signed-off-by: Bryan Boreham Co-authored-by: Julius Volz --- CHANGELOG.md | 4 +++ VERSION | 2 +- model/labels/labels.go | 9 +++---- model/labels/labels_string.go | 7 ++--- model/labels/labels_test.go | 7 +++++ model/relabel/relabel_test.go | 28 ++++++++++++++++++++ web/ui/module/codemirror-promql/package.json | 4 +-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++----- web/ui/react-app/package.json | 4 +-- 10 files changed, 60 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6e6b30f47..af90af1e1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## 2.43.1 / 2023-05-03 + +* [BUGFIX] Labels: `Set()` after `Del()` would be ignored, which broke some relabeling rules. #12322 + ## 2.43.0 / 2023-03-21 We are working on some performance improvements in Prometheus, which are only diff --git a/VERSION b/VERSION index 5b9cd9afd..b1d989340 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.43.0 +2.43.1 diff --git a/model/labels/labels.go b/model/labels/labels.go index 6de001c3c..2622f7941 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -531,16 +531,15 @@ func (b *Builder) Set(n, v string) *Builder { } func (b *Builder) Get(n string) string { - for _, d := range b.del { - if d == n { - return "" - } - } + // Del() removes entries from .add but Set() does not remove from .del, so check .add first. for _, a := range b.add { if a.Name == n { return a.Value } } + if slices.Contains(b.del, n) { + return "" + } return b.base.Get(n) } diff --git a/model/labels/labels_string.go b/model/labels/labels_string.go index 98db29d25..db8c981e0 100644 --- a/model/labels/labels_string.go +++ b/model/labels/labels_string.go @@ -587,14 +587,15 @@ func (b *Builder) Set(n, v string) *Builder { } func (b *Builder) Get(n string) string { - if slices.Contains(b.del, n) { - return "" - } + // Del() removes entries from .add but Set() does not remove from .del, so check .add first. for _, a := range b.add { if a.Name == n { return a.Value } } + if slices.Contains(b.del, n) { + return "" + } return b.base.Get(n) } diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index 588a84b98..98cb12f36 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -607,6 +607,13 @@ func TestBuilder(t *testing.T) { require.Equal(t, tcase.want.BytesWithoutLabels(nil, "aaa", "bbb"), b.Labels(tcase.base).Bytes(nil)) }) } + t.Run("set_after_del", func(t *testing.T) { + b := NewBuilder(FromStrings("aaa", "111")) + b.Del("bbb") + b.Set("bbb", "222") + require.Equal(t, FromStrings("aaa", "111", "bbb", "222"), b.Labels(EmptyLabels())) + require.Equal(t, "222", b.Get("bbb")) + }) } func TestScratchBuilder(t *testing.T) { diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index d277d778d..b50ff4010 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -397,6 +397,34 @@ func TestRelabel(t *testing.T) { "foo": "bar", }), }, + { // From https://github.com/prometheus/prometheus/issues/12283 + input: labels.FromMap(map[string]string{ + "__meta_kubernetes_pod_container_port_name": "foo", + "__meta_kubernetes_pod_annotation_XXX_metrics_port": "9091", + }), + relabel: []*Config{ + { + Regex: MustNewRegexp("^__meta_kubernetes_pod_container_port_name$"), + Action: LabelDrop, + }, + { + SourceLabels: model.LabelNames{"__meta_kubernetes_pod_annotation_XXX_metrics_port"}, + Regex: MustNewRegexp("(.+)"), + Action: Replace, + Replacement: "metrics", + TargetLabel: "__meta_kubernetes_pod_container_port_name", + }, + { + SourceLabels: model.LabelNames{"__meta_kubernetes_pod_container_port_name"}, + Regex: MustNewRegexp("^metrics$"), + Action: Keep, + }, + }, + output: labels.FromMap(map[string]string{ + "__meta_kubernetes_pod_annotation_XXX_metrics_port": "9091", + "__meta_kubernetes_pod_container_port_name": "metrics", + }), + }, { input: labels.FromMap(map[string]string{ "a": "foo", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index f20a63468..1ea56ae45 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.43.0", + "version": "0.43.1", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.43.0", + "@prometheus-io/lezer-promql": "0.43.1", "lru-cache": "^6.0.0" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 411251e56..99b98bdea 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.43.0", + "version": "0.43.1", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 2e334cf9e..797a177c1 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -28,10 +28,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.43.0", + "version": "0.43.1", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.43.0", + "@prometheus-io/lezer-promql": "0.43.1", "lru-cache": "^6.0.0" }, "devDependencies": { @@ -61,7 +61,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.43.0", + "version": "0.43.1", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.2.2", @@ -20763,7 +20763,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.43.0", + "version": "0.43.1", "dependencies": { "@codemirror/autocomplete": "^6.4.0", "@codemirror/commands": "^6.2.0", @@ -20781,7 +20781,7 @@ "@lezer/lr": "^1.3.1", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.43.0", + "@prometheus-io/codemirror-promql": "0.43.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.2.0", @@ -23417,7 +23417,7 @@ "@lezer/lr": "^1.3.1", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.43.0", + "@prometheus-io/codemirror-promql": "0.43.1", "@testing-library/react-hooks": "^7.0.2", "@types/enzyme": "^3.10.12", "@types/flot": "0.0.32", @@ -23468,7 +23468,7 @@ "@lezer/common": "^1.0.2", "@lezer/highlight": "^1.1.3", "@lezer/lr": "^1.3.1", - "@prometheus-io/lezer-promql": "0.43.0", + "@prometheus-io/lezer-promql": "0.43.1", "@types/lru-cache": "^5.1.1", "isomorphic-fetch": "^3.0.0", "lru-cache": "^6.0.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index c8b115582..37ad5e195 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.43.0", + "version": "0.43.1", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.4.0", @@ -19,7 +19,7 @@ "@lezer/common": "^1.0.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.43.0", + "@prometheus-io/codemirror-promql": "0.43.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.2.0", From f5fcaa3872ce03808567fabc56afc9cf61c732cb Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Fri, 5 May 2023 14:34:30 +0200 Subject: [PATCH 131/632] Fix setting reset header to gauge histogram in seriesToChunkEncoder (#12329) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- storage/series.go | 10 +++ storage/series_test.go | 177 +++++++++++++++++++++++++++++++---------- 2 files changed, 147 insertions(+), 40 deletions(-) diff --git a/storage/series.go b/storage/series.go index 5daa6255d..b73f1e35c 100644 --- a/storage/series.go +++ b/storage/series.go @@ -297,9 +297,11 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator { seriesIter := s.Series.Iterator(nil) lastType := chunkenc.ValNone for typ := seriesIter.Next(); typ != chunkenc.ValNone; typ = seriesIter.Next() { + chunkCreated := false if typ != lastType || i >= seriesToChunkEncoderSplit { // Create a new chunk if the sample type changed or too many samples in the current one. chks = appendChunk(chks, mint, maxt, chk) + chunkCreated = true chk, err = chunkenc.NewEmptyChunk(typ.ChunkEncoding()) if err != nil { return errChunksIterator{err: err} @@ -330,6 +332,7 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator { if ok, counterReset := app.AppendHistogram(t, h); !ok { chks = appendChunk(chks, mint, maxt, chk) histChunk := chunkenc.NewHistogramChunk() + chunkCreated = true if counterReset { histChunk.SetCounterResetHeader(chunkenc.CounterReset) } @@ -346,11 +349,15 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator { panic("unexpected error while appending histogram") } } + if chunkCreated && h.CounterResetHint == histogram.GaugeType { + chk.(*chunkenc.HistogramChunk).SetCounterResetHeader(chunkenc.GaugeType) + } case chunkenc.ValFloatHistogram: t, fh = seriesIter.AtFloatHistogram() if ok, counterReset := app.AppendFloatHistogram(t, fh); !ok { chks = appendChunk(chks, mint, maxt, chk) floatHistChunk := chunkenc.NewFloatHistogramChunk() + chunkCreated = true if counterReset { floatHistChunk.SetCounterResetHeader(chunkenc.CounterReset) } @@ -366,6 +373,9 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator { panic("unexpected error while float appending histogram") } } + if chunkCreated && fh.CounterResetHint == histogram.GaugeType { + chk.(*chunkenc.FloatHistogramChunk).SetCounterResetHeader(chunkenc.GaugeType) + } default: return errChunksIterator{err: fmt.Errorf("unknown sample type %s", typ.String())} } diff --git a/storage/series_test.go b/storage/series_test.go index 4c318f1a0..5c74fae09 100644 --- a/storage/series_test.go +++ b/storage/series_test.go @@ -126,14 +126,13 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) { } type histogramTest struct { - samples []tsdbutil.Sample - expectedChunks int - expectedCounterReset bool + samples []tsdbutil.Sample + expectedCounterResetHeaders []chunkenc.CounterResetHeader } func TestHistogramSeriesToChunks(t *testing.T) { h1 := &histogram.Histogram{ - Count: 3, + Count: 7, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, @@ -158,7 +157,7 @@ func TestHistogramSeriesToChunks(t *testing.T) { } // Implicit counter reset by reduction in buckets, not appendable. h2down := &histogram.Histogram{ - Count: 8, + Count: 10, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, @@ -171,7 +170,7 @@ func TestHistogramSeriesToChunks(t *testing.T) { } fh1 := &histogram.FloatHistogram{ - Count: 4, + Count: 6, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, @@ -183,7 +182,7 @@ func TestHistogramSeriesToChunks(t *testing.T) { } // Appendable to fh1. fh2 := &histogram.FloatHistogram{ - Count: 15, + Count: 17, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, @@ -196,7 +195,7 @@ func TestHistogramSeriesToChunks(t *testing.T) { } // Implicit counter reset by reduction in buckets, not appendable. fh2down := &histogram.FloatHistogram{ - Count: 13, + Count: 15, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, @@ -208,6 +207,60 @@ func TestHistogramSeriesToChunks(t *testing.T) { PositiveBuckets: []float64{2, 2, 7, 2}, } + // Gauge histogram. + gh1 := &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + Count: 7, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + }, + PositiveBuckets: []int64{2, 1}, // Abs: 2, 3 + } + gh2 := &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -2, 3}, // Abs: 2, 3, 1, 4 + } + + // Float gauge histogram. + gfh1 := &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Count: 6, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + }, + PositiveBuckets: []float64{3, 1}, + } + gfh2 := &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Count: 17, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{4, 2, 7, 2}, + } + staleHistogram := &histogram.Histogram{ Sum: math.Float64frombits(value.StaleNaN), } @@ -220,74 +273,70 @@ func TestHistogramSeriesToChunks(t *testing.T) { samples: []tsdbutil.Sample{ hSample{t: 1, h: h1}, }, - expectedChunks: 1, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset}, }, "two histograms encoded to a single chunk": { samples: []tsdbutil.Sample{ hSample{t: 1, h: h1}, hSample{t: 2, h: h2}, }, - expectedChunks: 1, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset}, }, "two histograms encoded to two chunks": { samples: []tsdbutil.Sample{ hSample{t: 1, h: h2}, hSample{t: 2, h: h1}, }, - expectedChunks: 2, - expectedCounterReset: true, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, }, "histogram and stale sample encoded to two chunks": { samples: []tsdbutil.Sample{ hSample{t: 1, h: staleHistogram}, hSample{t: 2, h: h1}, }, - expectedChunks: 2, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, }, "histogram and reduction in bucket encoded to two chunks": { samples: []tsdbutil.Sample{ hSample{t: 1, h: h1}, hSample{t: 2, h: h2down}, }, - expectedChunks: 2, - expectedCounterReset: true, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, }, // Float histograms. "single float histogram to single chunk": { samples: []tsdbutil.Sample{ fhSample{t: 1, fh: fh1}, }, - expectedChunks: 1, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset}, }, "two float histograms encoded to a single chunk": { samples: []tsdbutil.Sample{ fhSample{t: 1, fh: fh1}, fhSample{t: 2, fh: fh2}, }, - expectedChunks: 1, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset}, }, "two float histograms encoded to two chunks": { samples: []tsdbutil.Sample{ fhSample{t: 1, fh: fh2}, fhSample{t: 2, fh: fh1}, }, - expectedChunks: 2, - expectedCounterReset: true, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, }, "float histogram and stale sample encoded to two chunks": { samples: []tsdbutil.Sample{ fhSample{t: 1, fh: staleFloatHistogram}, fhSample{t: 2, fh: fh1}, }, - expectedChunks: 2, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, }, "float histogram and reduction in bucket encoded to two chunks": { samples: []tsdbutil.Sample{ fhSample{t: 1, fh: fh1}, fhSample{t: 2, fh: fh2down}, }, - expectedChunks: 2, - expectedCounterReset: true, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, }, // Mixed. "histogram and float histogram encoded to two chunks": { @@ -295,21 +344,61 @@ func TestHistogramSeriesToChunks(t *testing.T) { hSample{t: 1, h: h1}, fhSample{t: 2, fh: fh2}, }, - expectedChunks: 2, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, }, "float histogram and histogram encoded to two chunks": { samples: []tsdbutil.Sample{ fhSample{t: 1, fh: fh1}, hSample{t: 2, h: h2}, }, - expectedChunks: 2, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, }, "histogram and stale float histogram encoded to two chunks": { samples: []tsdbutil.Sample{ hSample{t: 1, h: h1}, fhSample{t: 2, fh: staleFloatHistogram}, }, - expectedChunks: 2, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, + }, + "single gauge histogram encoded to one chunk": { + samples: []tsdbutil.Sample{ + hSample{t: 1, h: gh1}, + }, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, + }, + "two gauge histograms encoded to one chunk when counter increases": { + samples: []tsdbutil.Sample{ + hSample{t: 1, h: gh1}, + hSample{t: 2, h: gh2}, + }, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, + }, + "two gauge histograms encoded to one chunk when counter decreases": { + samples: []tsdbutil.Sample{ + hSample{t: 1, h: gh2}, + hSample{t: 2, h: gh1}, + }, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, + }, + "single gauge float histogram encoded to one chunk": { + samples: []tsdbutil.Sample{ + fhSample{t: 1, fh: gfh1}, + }, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, + }, + "two float gauge histograms encoded to one chunk when counter increases": { + samples: []tsdbutil.Sample{ + fhSample{t: 1, fh: gfh1}, + fhSample{t: 2, fh: gfh2}, + }, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, + }, + "two float gauge histograms encoded to one chunk when counter decreases": { + samples: []tsdbutil.Sample{ + fhSample{t: 1, fh: gfh2}, + fhSample{t: 2, fh: gfh1}, + }, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, }, } @@ -322,13 +411,24 @@ func TestHistogramSeriesToChunks(t *testing.T) { func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) { lbs := labels.FromStrings("__name__", "up", "instance", "localhost:8080") - series := NewListSeries(lbs, test.samples) + copiedSamples := []tsdbutil.Sample{} + for _, s := range test.samples { + switch cs := s.(type) { + case hSample: + copiedSamples = append(copiedSamples, hSample{t: cs.t, h: cs.h.Copy()}) + case fhSample: + copiedSamples = append(copiedSamples, fhSample{t: cs.t, fh: cs.fh.Copy()}) + default: + t.Error("internal error, unexpected type") + } + } + series := NewListSeries(lbs, copiedSamples) encoder := NewSeriesToChunkEncoder(series) require.EqualValues(t, lbs, encoder.Labels()) chks, err := ExpandChunks(encoder.Iterator(nil)) require.NoError(t, err) - require.Equal(t, test.expectedChunks, len(chks)) + require.Equal(t, len(test.expectedCounterResetHeaders), len(chks)) // Decode all encoded samples and assert they are equal to the original ones. encodedSamples := expandHistogramSamples(chks) @@ -339,8 +439,10 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) { case hSample: encodedSample, ok := encodedSamples[i].(hSample) require.True(t, ok, "expect histogram", fmt.Sprintf("at idx %d", i)) - // Ignore counter reset here, will check on chunk level. - encodedSample.h.CounterResetHint = histogram.UnknownCounterReset + // Ignore counter reset if not gauge here, will check on chunk level. + if expectedSample.h.CounterResetHint != histogram.GaugeType { + encodedSample.h.CounterResetHint = histogram.UnknownCounterReset + } if value.IsStaleNaN(expectedSample.h.Sum) { require.True(t, value.IsStaleNaN(encodedSample.h.Sum), fmt.Sprintf("at idx %d", i)) continue @@ -349,8 +451,10 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) { case fhSample: encodedSample, ok := encodedSamples[i].(fhSample) require.True(t, ok, "expect float histogram", fmt.Sprintf("at idx %d", i)) - // Ignore counter reset here, will check on chunk level. - encodedSample.fh.CounterResetHint = histogram.UnknownCounterReset + // Ignore counter reset if not gauge here, will check on chunk level. + if expectedSample.fh.CounterResetHint != histogram.GaugeType { + encodedSample.fh.CounterResetHint = histogram.UnknownCounterReset + } if value.IsStaleNaN(expectedSample.fh.Sum) { require.True(t, value.IsStaleNaN(encodedSample.fh.Sum), fmt.Sprintf("at idx %d", i)) continue @@ -361,15 +465,8 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) { } } - // If a counter reset hint is expected, it can only be found in the second chunk. - // Otherwise, we assert an unknown counter reset hint in all chunks. - if test.expectedCounterReset { - require.Equal(t, chunkenc.UnknownCounterReset, getCounterResetHint(chks[0])) - require.Equal(t, chunkenc.CounterReset, getCounterResetHint(chks[1])) - } else { - for _, chk := range chks { - require.Equal(t, chunkenc.UnknownCounterReset, getCounterResetHint(chk)) - } + for i, expectedCounterResetHint := range test.expectedCounterResetHeaders { + require.Equal(t, expectedCounterResetHint, getCounterResetHint(chks[i]), fmt.Sprintf("chunk at index %d", i)) } } From 94d9367bbf13a5325c0d55b22babf56d164f91e1 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 8 May 2023 11:30:29 +0100 Subject: [PATCH 132/632] Create 2.44.0-rc.2 (#12341) Signed-off-by: Bryan Boreham --- CHANGELOG.md | 4 ++++ VERSION | 2 +- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/react-app/package.json | 4 ++-- 6 files changed, 17 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 06fb01ad0..ebc602800 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## 2.44.0-rc.2 / 2023-05-07 + +* [ENHANCEMENT] Storage: Optimise buffer used to iterate through samples. #12326 + ## 2.44.0-rc.1 / 2023-05-03 * [BUGFIX] Labels: Set after Del would be ignored, which broke some relabeling rules. #12322 diff --git a/VERSION b/VERSION index 662635da4..3f6719d55 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.44.0-rc.1 +2.44.0-rc.2 diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 11fdeee45..048b1aab4 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.44.0-rc.1", + "version": "0.44.0-rc.2", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.44.0-rc.1", + "@prometheus-io/lezer-promql": "0.44.0-rc.2", "lru-cache": "^6.0.0" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 4b3f29b52..622e9bea4 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.44.0-rc.1", + "version": "0.44.0-rc.2", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 37804eebf..0cd9eb729 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -28,10 +28,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.44.0-rc.1", + "version": "0.44.0-rc.2", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.44.0-rc.1", + "@prometheus-io/lezer-promql": "0.44.0-rc.2", "lru-cache": "^6.0.0" }, "devDependencies": { @@ -61,7 +61,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.44.0-rc.1", + "version": "0.44.0-rc.2", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.2.2", @@ -20763,7 +20763,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.44.0-rc.1", + "version": "0.44.0-rc.2", "dependencies": { "@codemirror/autocomplete": "^6.4.0", "@codemirror/commands": "^6.2.0", @@ -20781,7 +20781,7 @@ "@lezer/lr": "^1.3.1", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.44.0-rc.1", + "@prometheus-io/codemirror-promql": "0.44.0-rc.2", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.2.0", @@ -23417,7 +23417,7 @@ "@lezer/lr": "^1.3.1", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.44.0-rc.1", + "@prometheus-io/codemirror-promql": "0.44.0-rc.2", "@testing-library/react-hooks": "^7.0.2", "@types/enzyme": "^3.10.12", "@types/flot": "0.0.32", @@ -23468,7 +23468,7 @@ "@lezer/common": "^1.0.2", "@lezer/highlight": "^1.1.3", "@lezer/lr": "^1.3.1", - "@prometheus-io/lezer-promql": "0.44.0-rc.1", + "@prometheus-io/lezer-promql": "0.44.0-rc.2", "@types/lru-cache": "^5.1.1", "isomorphic-fetch": "^3.0.0", "lru-cache": "^6.0.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 57e4d8120..1e0c4ed15 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.44.0-rc.1", + "version": "0.44.0-rc.2", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.4.0", @@ -19,7 +19,7 @@ "@lezer/common": "^1.0.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.44.0-rc.1", + "@prometheus-io/codemirror-promql": "0.44.0-rc.2", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.2.0", From b1b8fd77c419795d23b97a91cfe4e1f4c8d4d30c Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 9 May 2023 10:24:55 +0000 Subject: [PATCH 133/632] docs: state that remote write is stable Since https://github.com/prometheus/docs/pull/2313 has been merged declaring remote write to be stable at version 1.0. Signed-off-by: Bryan Boreham --- docs/stability.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/stability.md b/docs/stability.md index e4cc3203b..1fd2e51e0 100644 --- a/docs/stability.md +++ b/docs/stability.md @@ -18,12 +18,13 @@ Things considered stable for 2.x: * Configuration file format (minus the service discovery remote read/write, see below) * Rule/alert file format * Console template syntax and semantics +* Remote write sending, per the [1.0 specification](https://prometheus.io/docs/concepts/remote_write_spec/). Things considered unstable for 2.x: * Any feature listed as experimental or subject to change, including: * The [`holt_winters` PromQL function](https://github.com/prometheus/prometheus/issues/2458) - * Remote read, remote write and the remote read endpoint + * Remote write receiving, remote read and the remote read endpoint * Server-side HTTPS and basic authentication * Service discovery integrations, with the exception of `static_configs` and `file_sd_configs` * Go APIs of packages that are part of the server From a96350f15f02607d59a7e54373963a11926815b3 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 9 May 2023 19:30:18 +0530 Subject: [PATCH 134/632] Remove codesome and add jesusvazquez in CODEOWNERS for tsdb Signed-off-by: Ganesh Vernekar --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 75d24cdef..b75a7896e 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -2,6 +2,6 @@ /web/ui/module @juliusv @nexucis /storage/remote @csmarchbanks @cstyan @bwplotka @tomwilkie /discovery/kubernetes @brancz -/tsdb @codesome +/tsdb @jesusvazquez /promql @roidelapluie /cmd/promtool @dgl From 9e4e2a4a51d69fe621b1628d0618b25e3a6baa1c Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Wed, 10 May 2023 12:38:02 -0400 Subject: [PATCH 135/632] wlog: use filepath for getting checkpoint number This changes usage of path to be replaced with path/filepath, allowing for filepath.Base to properly return the base directory on systems where `/` is not the standard path separator. This resolves an issue on Windows where intermediate folders containing a `.` were incorrectly considered to be a part of the checkpoint name. Related to grafana/agent#3826. Signed-off-by: Robert Fratto --- tsdb/wlog/watcher.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index b0c17dcba..31f60feab 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -18,7 +18,7 @@ import ( "io" "math" "os" - "path" + "path/filepath" "strconv" "strings" "time" @@ -156,7 +156,7 @@ func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logge writer: writer, metrics: metrics, readerMetrics: readerMetrics, - walDir: path.Join(dir, "wal"), + walDir: filepath.Join(dir, "wal"), name: name, sendExemplars: sendExemplars, sendHistograms: sendHistograms, @@ -691,7 +691,7 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err func checkpointNum(dir string) (int, error) { // Checkpoint dir names are in the format checkpoint.000001 // dir may contain a hidden directory, so only check the base directory - chunks := strings.Split(path.Base(dir), ".") + chunks := strings.Split(filepath.Base(dir), ".") if len(chunks) != 2 { return 0, errors.Errorf("invalid checkpoint dir string: %s", dir) } From c0f1abb574ae928662146907e0c0fbe6eca979e0 Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Wed, 10 May 2023 15:46:14 -0700 Subject: [PATCH 136/632] MatchNotRegexp optimization Signed-off-by: Alan Protasio --- tsdb/querier.go | 8 +++++ tsdb/querier_bench_test.go | 3 ++ tsdb/querier_test.go | 60 ++++++++++++++++++++++++++------------ 3 files changed, 52 insertions(+), 19 deletions(-) diff --git a/tsdb/querier.go b/tsdb/querier.go index 70f8f6de8..8b17bb745 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -371,6 +371,14 @@ func inversePostingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Posting if m.Type == labels.MatchEqual && m.Value == "" { res = vals } else { + // Inverse of a MatchNotRegexp is MatchRegexp (double negation). + // Fast-path for set matching. + if m.Type == labels.MatchNotRegexp { + setMatches := findSetMatches(m.GetRegexString()) + if len(setMatches) > 0 { + return ix.Postings(m.Name, setMatches...) + } + } for _, val := range vals { if !m.Matches(val) { res = append(res, val) diff --git a/tsdb/querier_bench_test.go b/tsdb/querier_bench_test.go index 89758a1d3..6e5243ef1 100644 --- a/tsdb/querier_bench_test.go +++ b/tsdb/querier_bench_test.go @@ -114,6 +114,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) { iCharSet := labels.MustNewMatcher(labels.MatchRegexp, "i", "1[0-9]") iAlternate := labels.MustNewMatcher(labels.MatchRegexp, "i", "(1|2|3|4|5|6|20|55)") iXYZ := labels.MustNewMatcher(labels.MatchRegexp, "i", "X|Y|Z") + iNotXYZ := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "X|Y|Z") cases := []struct { name string matchers []*labels.Matcher @@ -131,6 +132,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) { {`j=~"X.+"`, []*labels.Matcher{jXplus}}, {`i=~"(1|2|3|4|5|6|20|55)"`, []*labels.Matcher{iAlternate}}, {`i=~"X|Y|Z"`, []*labels.Matcher{iXYZ}}, + {`i!~"X|Y|Z"`, []*labels.Matcher{iNotXYZ}}, {`i=~".*"`, []*labels.Matcher{iStar}}, {`i=~"1.*"`, []*labels.Matcher{i1Star}}, {`i=~".*1"`, []*labels.Matcher{iStar1}}, @@ -146,6 +148,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) { {`n="1",i!="",j=~"X.+"`, []*labels.Matcher{n1, iNotEmpty, jXplus}}, {`n="1",i!="",j=~"XXX|YYY"`, []*labels.Matcher{n1, iNotEmpty, jXXXYYY}}, {`n="1",i=~"X|Y|Z",j="foo"`, []*labels.Matcher{n1, iXYZ, jFoo}}, + {`n="1",i!~"X|Y|Z",j="foo"`, []*labels.Matcher{n1, iNotXYZ, jFoo}}, {`n="1",i=~".+",j="foo"`, []*labels.Matcher{n1, iPlus, jFoo}}, {`n="1",i=~"1.+",j="foo"`, []*labels.Matcher{n1, i1Plus, jFoo}}, {`n="1",i=~".*1.*",j="foo"`, []*labels.Matcher{n1, iStar1Star, jFoo}}, diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 5bb99a453..9e17f5b8d 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -1801,6 +1801,19 @@ func TestPostingsForMatchers(t *testing.T) { labels.FromStrings("n", "2.5"), }, }, + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "1")}, + exp: []labels.Labels{ + labels.FromStrings("n", "2"), + labels.FromStrings("n", "2.5"), + }, + }, + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "1|2.5")}, + exp: []labels.Labels{ + labels.FromStrings("n", "2"), + }, + }, { matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^a$")}, exp: []labels.Labels{ @@ -1890,27 +1903,36 @@ func TestPostingsForMatchers(t *testing.T) { require.NoError(t, err) for _, c := range cases { - exp := map[string]struct{}{} - for _, l := range c.exp { - exp[l.String()] = struct{}{} - } - p, err := PostingsForMatchers(ir, c.matchers...) - require.NoError(t, err) - - var builder labels.ScratchBuilder - for p.Next() { - require.NoError(t, ir.Series(p.At(), &builder, &[]chunks.Meta{})) - lbls := builder.Labels() - if _, ok := exp[lbls.String()]; !ok { - t.Errorf("Evaluating %v, unexpected result %s", c.matchers, lbls.String()) - } else { - delete(exp, lbls.String()) + name := "" + for i, matcher := range c.matchers { + if i > 0 { + name += "," } + name += matcher.String() } - require.NoError(t, p.Err()) - if len(exp) != 0 { - t.Errorf("Evaluating %v, missing results %+v", c.matchers, exp) - } + t.Run(name, func(t *testing.T) { + exp := map[string]struct{}{} + for _, l := range c.exp { + exp[l.String()] = struct{}{} + } + p, err := PostingsForMatchers(ir, c.matchers...) + require.NoError(t, err) + + var builder labels.ScratchBuilder + for p.Next() { + require.NoError(t, ir.Series(p.At(), &builder, &[]chunks.Meta{})) + lbls := builder.Labels() + if _, ok := exp[lbls.String()]; !ok { + t.Errorf("Evaluating %v, unexpected result %s", c.matchers, lbls.String()) + } else { + delete(exp, lbls.String()) + } + } + require.NoError(t, p.Err()) + if len(exp) != 0 { + t.Errorf("Evaluating %v, missing results %+v", c.matchers, exp) + } + }) } } From 2f3561971078259fedb2425f1cc218b6cecd84ce Mon Sep 17 00:00:00 2001 From: Mickael Carl Date: Wed, 3 May 2023 21:46:13 +0100 Subject: [PATCH 137/632] discovery/kubernetes: attach node labels when the endpoints TargetRef's kind are Node Signed-off-by: Mickael Carl --- discovery/kubernetes/endpoints.go | 7 +- discovery/kubernetes/endpoints_test.go | 102 ++++++++++-- discovery/kubernetes/endpointslice.go | 6 +- discovery/kubernetes/endpointslice_test.go | 178 ++++++++++++++++++++- discovery/kubernetes/kubernetes.go | 38 +++-- 5 files changed, 302 insertions(+), 29 deletions(-) diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index 27742ab46..2413dab45 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -305,7 +305,11 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { } if e.withNodeMetadata { - target = addNodeLabels(target, e.nodeInf, e.logger, addr.NodeName) + if addr.NodeName != nil { + target = addNodeLabels(target, e.nodeInf, e.logger, addr.NodeName) + } else if addr.TargetRef != nil && addr.TargetRef.Kind == "Node" { + target = addNodeLabels(target, e.nodeInf, e.logger, &addr.TargetRef.Name) + } } pod := e.resolvePodRef(addr.TargetRef) @@ -466,5 +470,6 @@ func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger log.L nodeLabelset[model.LabelName(nodeLabelPrefix+ln)] = lv(v) nodeLabelset[model.LabelName(nodeLabelPresentPrefix+ln)] = presentValue } + return tg.Merge(nodeLabelset) } diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index 91b1b0c67..5aa58bdc4 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -69,6 +69,24 @@ func makeEndpoints() *v1.Endpoints { }, }, }, + { + Addresses: []v1.EndpointAddress{ + { + IP: "6.7.8.9", + TargetRef: &v1.ObjectReference{ + Kind: "Node", + Name: "barbaz", + }, + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "testport", + Port: 9002, + Protocol: v1.ProtocolTCP, + }, + }, + }, }, } } @@ -106,6 +124,14 @@ func TestEndpointsDiscoveryBeforeRun(t *testing.T) { "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, + { + "__address__": "6.7.8.9:9002", + "__meta_kubernetes_endpoint_address_target_kind": "Node", + "__meta_kubernetes_endpoint_address_target_name": "barbaz", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "default", @@ -398,6 +424,14 @@ func TestEndpointsDiscoveryWithService(t *testing.T) { "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, + { + "__address__": "6.7.8.9:9002", + "__meta_kubernetes_endpoint_address_target_kind": "Node", + "__meta_kubernetes_endpoint_address_target_name": "barbaz", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "default", @@ -466,6 +500,14 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) { "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, + { + "__address__": "6.7.8.9:9002", + "__meta_kubernetes_endpoint_address_target_kind": "Node", + "__meta_kubernetes_endpoint_address_target_name": "barbaz", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "default", @@ -484,8 +526,10 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) { func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) { metadataConfig := AttachMetadataConfig{Node: true} - nodeLabels := map[string]string{"az": "us-east1"} - node := makeNode("foobar", "", "", nodeLabels, nil) + nodeLabels1 := map[string]string{"az": "us-east1"} + nodeLabels2 := map[string]string{"az": "us-west2"} + node1 := makeNode("foobar", "", "", nodeLabels1, nil) + node2 := makeNode("barbaz", "", "", nodeLabels2, nil) svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", @@ -495,7 +539,7 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) { }, }, } - n, _ := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), svc, node) + n, _ := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), svc, node1, node2) k8sDiscoveryTest{ discovery: n, @@ -526,6 +570,17 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) { "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, + { + "__address__": "6.7.8.9:9002", + "__meta_kubernetes_endpoint_address_target_kind": "Node", + "__meta_kubernetes_endpoint_address_target_name": "barbaz", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + "__meta_kubernetes_node_label_az": "us-west2", + "__meta_kubernetes_node_labelpresent_az": "true", + "__meta_kubernetes_node_name": "barbaz", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "default", @@ -541,8 +596,10 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) { } func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) { - nodeLabels := map[string]string{"az": "us-east1"} - nodes := makeNode("foobar", "", "", nodeLabels, nil) + nodeLabels1 := map[string]string{"az": "us-east1"} + nodeLabels2 := map[string]string{"az": "us-west2"} + node1 := makeNode("foobar", "", "", nodeLabels1, nil) + node2 := makeNode("barbaz", "", "", nodeLabels2, nil) metadataConfig := AttachMetadataConfig{Node: true} svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -553,13 +610,13 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) { }, }, } - n, c := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), nodes, svc) + n, c := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), node1, node2, svc) k8sDiscoveryTest{ discovery: n, afterStart: func() { - nodes.Labels["az"] = "eu-central1" - c.CoreV1().Nodes().Update(context.Background(), nodes, metav1.UpdateOptions{}) + node1.Labels["az"] = "eu-central1" + c.CoreV1().Nodes().Update(context.Background(), node1, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ @@ -572,7 +629,7 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) { "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", - "__meta_kubernetes_node_label_az": "eu-central1", + "__meta_kubernetes_node_label_az": "us-east1", "__meta_kubernetes_node_labelpresent_az": "true", "__meta_kubernetes_node_name": "foobar", }, @@ -588,6 +645,17 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) { "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, + { + "__address__": "6.7.8.9:9002", + "__meta_kubernetes_endpoint_address_target_kind": "Node", + "__meta_kubernetes_endpoint_address_target_name": "barbaz", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + "__meta_kubernetes_node_label_az": "us-west2", + "__meta_kubernetes_node_labelpresent_az": "true", + "__meta_kubernetes_node_name": "barbaz", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "default", @@ -699,6 +767,14 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, + { + "__address__": "6.7.8.9:9002", + "__meta_kubernetes_endpoint_address_target_kind": "Node", + "__meta_kubernetes_endpoint_address_target_name": "barbaz", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "ns1", @@ -815,6 +891,14 @@ func TestEndpointsDiscoveryOwnNamespace(t *testing.T) { "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, + { + "__address__": "6.7.8.9:9002", + "__meta_kubernetes_endpoint_address_target_kind": "Node", + "__meta_kubernetes_endpoint_address_target_name": "barbaz", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "own-ns", diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index 841b7d4f6..c7df64252 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -339,7 +339,11 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou } if e.withNodeMetadata { - target = addNodeLabels(target, e.nodeInf, e.logger, ep.nodename()) + if ep.targetRef() != nil && ep.targetRef().Kind == "Node" { + target = addNodeLabels(target, e.nodeInf, e.logger, &ep.targetRef().Name) + } else { + target = addNodeLabels(target, e.nodeInf, e.logger, ep.nodename()) + } } pod := e.resolvePodRef(ep.targetRef()) diff --git a/discovery/kubernetes/endpointslice_test.go b/discovery/kubernetes/endpointslice_test.go index f4076b943..8104e3db3 100644 --- a/discovery/kubernetes/endpointslice_test.go +++ b/discovery/kubernetes/endpointslice_test.go @@ -90,6 +90,17 @@ func makeEndpointSliceV1() *v1.EndpointSlice { Serving: boolptr(true), Terminating: boolptr(true), }, + }, { + Addresses: []string{"4.5.6.7"}, + Conditions: v1.EndpointConditions{ + Ready: boolptr(true), + Serving: boolptr(true), + Terminating: boolptr(false), + }, + TargetRef: &corev1.ObjectReference{ + Kind: "Node", + Name: "barbaz", + }, }, }, } @@ -130,6 +141,17 @@ func makeEndpointSliceV1beta1() *v1beta1.EndpointSlice { Serving: boolptr(true), Terminating: boolptr(true), }, + }, { + Addresses: []string{"4.5.6.7"}, + Conditions: v1beta1.EndpointConditions{ + Ready: boolptr(true), + Serving: boolptr(true), + Terminating: boolptr(false), + }, + TargetRef: &corev1.ObjectReference{ + Kind: "Node", + Name: "barbaz", + }, }, }, } @@ -183,6 +205,18 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_endpointslice_address_type": "IPv4", @@ -233,6 +267,17 @@ func TestEndpointSliceDiscoveryBeforeRunV1beta1(t *testing.T) { "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_endpointslice_address_type": "IPv4", @@ -419,6 +464,18 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) { "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + }, }, Labels: map[model.LabelName]model.LabelValue{ "__meta_kubernetes_endpointslice_address_type": "IPv4", @@ -503,6 +560,18 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) { "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_endpointslice_address_type": "IPv4", @@ -576,6 +645,18 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) { "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_endpointslice_address_type": "IPv4", @@ -644,6 +725,18 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) { "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_endpointslice_address_type": "IPv4", @@ -728,6 +821,18 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { "__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_app_protocol": "http", }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_endpointslice_address_type": "IPv4", @@ -747,7 +852,8 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { metadataConfig := AttachMetadataConfig{Node: true} - nodeLabels := map[string]string{"az": "us-east1"} + nodeLabels1 := map[string]string{"az": "us-east1"} + nodeLabels2 := map[string]string{"az": "us-west2"} svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", @@ -757,7 +863,7 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { }, }, } - objs := []runtime.Object{makeEndpointSliceV1(), makeNode("foobar", "", "", nodeLabels, nil), svc} + objs := []runtime.Object{makeEndpointSliceV1(), makeNode("foobar", "", "", nodeLabels1, nil), makeNode("barbaz", "", "", nodeLabels2, nil), svc} n, _ := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...) k8sDiscoveryTest{ @@ -804,6 +910,21 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_node_label_az": "us-west2", + "__meta_kubernetes_node_labelpresent_az": "true", + "__meta_kubernetes_node_name": "barbaz", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_endpointslice_address_type": "IPv4", @@ -821,7 +942,8 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { metadataConfig := AttachMetadataConfig{Node: true} - nodeLabels := map[string]string{"az": "us-east1"} + nodeLabels1 := map[string]string{"az": "us-east1"} + nodeLabels2 := map[string]string{"az": "us-west2"} svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", @@ -831,16 +953,17 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { }, }, } - node := makeNode("foobar", "", "", nodeLabels, nil) - objs := []runtime.Object{makeEndpointSliceV1(), node, svc} + node1 := makeNode("foobar", "", "", nodeLabels1, nil) + node2 := makeNode("barbaz", "", "", nodeLabels2, nil) + objs := []runtime.Object{makeEndpointSliceV1(), node1, node2, svc} n, c := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...) k8sDiscoveryTest{ discovery: n, expectedMaxItems: 2, afterStart: func() { - node.Labels["az"] = "us-central1" - c.CoreV1().Nodes().Update(context.Background(), node, metav1.UpdateOptions{}) + node1.Labels["az"] = "us-central1" + c.CoreV1().Nodes().Update(context.Background(), node1, metav1.UpdateOptions{}) }, expectedRes: map[string]*targetgroup.Group{ "endpointslice/default/testendpoints": { @@ -859,7 +982,7 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", - "__meta_kubernetes_node_label_az": "us-central1", + "__meta_kubernetes_node_label_az": "us-east1", "__meta_kubernetes_node_labelpresent_az": "true", "__meta_kubernetes_node_name": "foobar", }, @@ -883,6 +1006,21 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_node_label_az": "us-west2", + "__meta_kubernetes_node_labelpresent_az": "true", + "__meta_kubernetes_node_name": "barbaz", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_endpointslice_address_type": "IPv4", @@ -1007,6 +1145,18 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_app_protocol": "http", }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_endpointslice_address_type": "IPv4", @@ -1139,6 +1289,18 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) { "__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_app_protocol": "http", }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_endpointslice_address_type": "IPv4", diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index a44bd513c..e87a1c9b2 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -761,15 +761,21 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share indexers[nodeIndex] = func(obj interface{}) ([]string, error) { e, ok := obj.(*apiv1.Endpoints) if !ok { - return nil, fmt.Errorf("object is not a pod") + return nil, fmt.Errorf("object is not endpoints") } var nodes []string for _, target := range e.Subsets { for _, addr := range target.Addresses { - if addr.NodeName == nil { - continue + if addr.TargetRef != nil { + switch addr.TargetRef.Kind { + case "Pod": + if addr.NodeName != nil { + nodes = append(nodes, *addr.NodeName) + } + case "Node": + nodes = append(nodes, addr.TargetRef.Name) + } } - nodes = append(nodes, *addr.NodeName) } } return nodes, nil @@ -789,17 +795,29 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object switch e := obj.(type) { case *disv1.EndpointSlice: for _, target := range e.Endpoints { - if target.NodeName == nil { - continue + if target.TargetRef != nil { + switch target.TargetRef.Kind { + case "Pod": + if target.NodeName != nil { + nodes = append(nodes, *target.NodeName) + } + case "Node": + nodes = append(nodes, target.TargetRef.Name) + } } - nodes = append(nodes, *target.NodeName) } case *disv1beta1.EndpointSlice: for _, target := range e.Endpoints { - if target.NodeName == nil { - continue + if target.TargetRef != nil { + switch target.TargetRef.Kind { + case "Pod": + if target.NodeName != nil { + nodes = append(nodes, *target.NodeName) + } + case "Node": + nodes = append(nodes, target.TargetRef.Name) + } } - nodes = append(nodes, *target.NodeName) } default: return nil, fmt.Errorf("object is not an endpointslice") From 9e500345f39ce310a96650b8500e825012fa579f Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 11 May 2023 01:59:21 +0200 Subject: [PATCH 138/632] textparse/scrape: Add option to scrape both classic and native histograms So far, if a target exposes a histogram with both classic and native buckets, a native-histogram enabled Prometheus would ignore the classic buckets. With the new scrape config option `scrape_classic_histograms` set, both buckets will be ingested, creating all the series of a classic histogram in parallel to the native histogram series. For example, a histogram `foo` would create a native histogram series `foo` and classic series called `foo_sum`, `foo_count`, and `foo_bucket`. This feature can be used in a migration strategy from classic to native histograms, where it is desired to have a transition period during which both native and classic histograms are present. Note that two bugs in classic histogram parsing were found and fixed as a byproduct of testing the new feature: 1. Series created from classic _gauge_ histograms didn't get the _sum/_count/_bucket prefix set. 2. Values of classic _float_ histograms weren't parsed properly. Signed-off-by: beorn7 --- config/config.go | 17 +- docs/configuration/configuration.md | 4 + model/textparse/interface.go | 6 +- model/textparse/interface_test.go | 2 +- model/textparse/protobufparse.go | 49 +- model/textparse/protobufparse_test.go | 1393 ++++++++++++++++++------- promql/fuzz.go | 2 +- scrape/scrape.go | 123 ++- scrape/scrape_test.go | 36 +- web/federate_test.go | 2 +- 10 files changed, 1148 insertions(+), 486 deletions(-) diff --git a/config/config.go b/config/config.go index 1b6a6cf6b..d0ba03ab2 100644 --- a/config/config.go +++ b/config/config.go @@ -146,13 +146,14 @@ var ( // DefaultScrapeConfig is the default scrape configuration. DefaultScrapeConfig = ScrapeConfig{ - // ScrapeTimeout and ScrapeInterval default to the - // configured globals. - MetricsPath: "/metrics", - Scheme: "http", - HonorLabels: false, - HonorTimestamps: true, - HTTPClientConfig: config.DefaultHTTPClientConfig, + // ScrapeTimeout and ScrapeInterval default to the configured + // globals. + ScrapeClassicHistograms: false, + MetricsPath: "/metrics", + Scheme: "http", + HonorLabels: false, + HonorTimestamps: true, + HTTPClientConfig: config.DefaultHTTPClientConfig, } // DefaultAlertmanagerConfig is the default alertmanager configuration. @@ -467,6 +468,8 @@ type ScrapeConfig struct { ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"` // The timeout for scraping targets of this config. ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"` + // Whether to scrape a classic histogram that is also exposed as a native histogram. + ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"` // The HTTP resource path on which to fetch metrics from targets. MetricsPath string `yaml:"metrics_path,omitempty"` // The URL scheme with which to fetch metrics from targets. diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 0a8c4a5cd..c74c9d478 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -134,6 +134,10 @@ job_name: # Per-scrape timeout when scraping this job. [ scrape_timeout: | default = ] +# Whether to scrape a classic histogram that is also exposed as a native +# histogram (has no effect without --enable-feature=native-histograms). +[ scrape_classic_histograms: | default = false ] + # The HTTP resource path on which to fetch metrics from targets. [ metrics_path: | default = /metrics ] diff --git a/model/textparse/interface.go b/model/textparse/interface.go index 9efd942e8..efa581410 100644 --- a/model/textparse/interface.go +++ b/model/textparse/interface.go @@ -71,7 +71,7 @@ type Parser interface { // // This function always returns a valid parser, but might additionally // return an error if the content type cannot be parsed. -func New(b []byte, contentType string) (Parser, error) { +func New(b []byte, contentType string, parseClassicHistograms bool) (Parser, error) { if contentType == "" { return NewPromParser(b), nil } @@ -84,7 +84,7 @@ func New(b []byte, contentType string) (Parser, error) { case "application/openmetrics-text": return NewOpenMetricsParser(b), nil case "application/vnd.google.protobuf": - return NewProtobufParser(b), nil + return NewProtobufParser(b, parseClassicHistograms), nil default: return NewPromParser(b), nil } @@ -100,7 +100,7 @@ const ( EntrySeries Entry = 2 // A series with a simple float64 as value. EntryComment Entry = 3 EntryUnit Entry = 4 - EntryHistogram Entry = 5 // A series with a sparse histogram as a value. + EntryHistogram Entry = 5 // A series with a native histogram as a value. ) // MetricType represents metric type values. diff --git a/model/textparse/interface_test.go b/model/textparse/interface_test.go index d94467d4d..de140d681 100644 --- a/model/textparse/interface_test.go +++ b/model/textparse/interface_test.go @@ -91,7 +91,7 @@ func TestNewParser(t *testing.T) { tt := tt // Copy to local variable before going parallel. t.Parallel() - p, err := New([]byte{}, tt.contentType) + p, err := New([]byte{}, tt.contentType, false) tt.validateParser(t, p) if tt.err == "" { require.NoError(t, err) diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index eca145955..b831251ad 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -52,8 +52,10 @@ type ProtobufParser struct { // fieldPos is the position within a Summary or (legacy) Histogram. -2 // is the count. -1 is the sum. Otherwise it is the index within // quantiles/buckets. - fieldPos int - fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. + fieldPos int + fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. + redoClassic bool // true after parsing a native histogram if we need to parse it again as a classit histogram. + // state is marked by the entry we are processing. EntryInvalid implies // that we have to decode the next MetricFamily. state Entry @@ -62,17 +64,22 @@ type ProtobufParser struct { mf *dto.MetricFamily + // Wether to also parse a classic histogram that is also present as a + // native histogram. + parseClassicHistograms bool + // The following are just shenanigans to satisfy the Parser interface. metricBytes *bytes.Buffer // A somewhat fluid representation of the current metric. } // NewProtobufParser returns a parser for the payload in the byte slice. -func NewProtobufParser(b []byte) Parser { +func NewProtobufParser(b []byte, parseClassicHistograms bool) Parser { return &ProtobufParser{ - in: b, - state: EntryInvalid, - mf: &dto.MetricFamily{}, - metricBytes: &bytes.Buffer{}, + in: b, + state: EntryInvalid, + mf: &dto.MetricFamily{}, + metricBytes: &bytes.Buffer{}, + parseClassicHistograms: parseClassicHistograms, } } @@ -106,19 +113,28 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { v = s.GetQuantile()[p.fieldPos].GetValue() } case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: - // This should only happen for a legacy histogram. + // This should only happen for a classic histogram. h := m.GetHistogram() switch p.fieldPos { case -2: - v = float64(h.GetSampleCount()) + v = h.GetSampleCountFloat() + if v == 0 { + v = float64(h.GetSampleCount()) + } case -1: v = h.GetSampleSum() default: bb := h.GetBucket() if p.fieldPos >= len(bb) { - v = float64(h.GetSampleCount()) + v = h.GetSampleCountFloat() + if v == 0 { + v = float64(h.GetSampleCount()) + } } else { - v = float64(bb[p.fieldPos].GetCumulativeCount()) + v = bb[p.fieldPos].GetCumulativeCountFloat() + if v == 0 { + v = float64(bb[p.fieldPos].GetCumulativeCount()) + } } } default: @@ -149,6 +165,9 @@ func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *his ts = m.GetTimestampMs() h = m.GetHistogram() ) + if p.parseClassicHistograms && len(h.GetBucket()) > 0 { + p.redoClassic = true + } if h.GetSampleCountFloat() > 0 || h.GetZeroCountFloat() > 0 { // It is a float histogram. fh := histogram.FloatHistogram{ @@ -376,6 +395,12 @@ func (p *ProtobufParser) Next() (Entry, error) { return EntryInvalid, err } case EntryHistogram, EntrySeries: + if p.redoClassic { + p.redoClassic = false + p.state = EntrySeries + p.fieldPos = -3 + p.fieldsDone = false + } t := p.mf.GetType() if p.state == EntrySeries && !p.fieldsDone && (t == dto.MetricType_SUMMARY || @@ -432,7 +457,7 @@ func (p *ProtobufParser) updateMetricBytes() error { // state. func (p *ProtobufParser) getMagicName() string { t := p.mf.GetType() - if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_SUMMARY) { + if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_GAUGE_HISTOGRAM && t != dto.MetricType_SUMMARY) { return p.mf.GetName() } if p.fieldPos == -2 { diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index 90c6a90f3..882cce59d 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -30,8 +30,8 @@ import ( dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" ) -func TestProtobufParse(t *testing.T) { - textMetricFamilies := []string{ +func createTestProtoBuf(t *testing.T) *bytes.Buffer { + testMetricFamilies := []string{ `name: "go_build_info" help: "Build information about the main Go module." type: GAUGE @@ -231,8 +231,7 @@ help: "Test float histogram with many buckets removed to keep it manageable in s type: HISTOGRAM metric: < histogram: < - sample_count: 175 - sample_count_float: 175.0 + sample_count_float: 175.0 sample_sum: 0.0008280461746287094 bucket: < cumulative_count_float: 2.0 @@ -302,8 +301,7 @@ help: "Like test_float_histogram but as gauge histogram." type: GAUGE_HISTOGRAM metric: < histogram: < - sample_count: 175 - sample_count_float: 175.0 + sample_count_float: 175.0 sample_sum: 0.0008280461746287094 bucket: < cumulative_count_float: 2.0 @@ -450,9 +448,9 @@ metric: < } varintBuf := make([]byte, binary.MaxVarintLen32) - inputBuf := &bytes.Buffer{} + buf := &bytes.Buffer{} - for _, tmf := range textMetricFamilies { + for _, tmf := range testMetricFamilies { pb := &dto.MetricFamily{} // From text to proto message. require.NoError(t, proto.UnmarshalText(tmf, pb)) @@ -462,11 +460,15 @@ metric: < // Write first length, then binary protobuf. varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) - inputBuf.Write(varintBuf[:varintLength]) - inputBuf.Write(protoBuf) + buf.Write(varintBuf[:varintLength]) + buf.Write(protoBuf) } - exp := []struct { + return buf +} + +func TestProtobufParse(t *testing.T) { + type parseResult struct { lset labels.Labels m string t int64 @@ -478,417 +480,1006 @@ metric: < shs *histogram.Histogram fhs *histogram.FloatHistogram e []exemplar.Exemplar + } + + inputBuf := createTestProtoBuf(t) + + scenarios := []struct { + name string + parser Parser + expected []parseResult }{ { - m: "go_build_info", - help: "Build information about the main Go module.", - }, - { - m: "go_build_info", - typ: MetricTypeGauge, - }, - { - m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)", - v: 1, - lset: labels.FromStrings( - "__name__", "go_build_info", - "checksum", "", - "path", "github.com/prometheus/client_golang", - "version", "(devel)", - ), - }, - { - m: "go_memstats_alloc_bytes_total", - help: "Total number of bytes allocated, even if freed.", - }, - { - m: "go_memstats_alloc_bytes_total", - typ: MetricTypeCounter, - }, - { - m: "go_memstats_alloc_bytes_total", - v: 1.546544e+06, - lset: labels.FromStrings( - "__name__", "go_memstats_alloc_bytes_total", - ), - e: []exemplar.Exemplar{ - {Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233}, - }, - }, - { - m: "something_untyped", - help: "Just to test the untyped type.", - }, - { - m: "something_untyped", - typ: MetricTypeUnknown, - }, - { - m: "something_untyped", - t: 1234567, - v: 42, - lset: labels.FromStrings( - "__name__", "something_untyped", - ), - }, - { - m: "test_histogram", - help: "Test histogram with many buckets removed to keep it manageable in size.", - }, - { - m: "test_histogram", - typ: MetricTypeHistogram, - }, - { - m: "test_histogram", - t: 1234568, - shs: &histogram.Histogram{ - Count: 175, - ZeroCount: 2, - Sum: 0.0008280461746287094, - ZeroThreshold: 2.938735877055719e-39, - Schema: 3, - PositiveSpans: []histogram.Span{ - {Offset: -161, Length: 1}, - {Offset: 8, Length: 3}, + name: "ignore classic buckets of native histograms", + parser: NewProtobufParser(inputBuf.Bytes(), false), + expected: []parseResult{ + { + m: "go_build_info", + help: "Build information about the main Go module.", }, - NegativeSpans: []histogram.Span{ - {Offset: -162, Length: 1}, - {Offset: 23, Length: 4}, + { + m: "go_build_info", + typ: MetricTypeGauge, }, - PositiveBuckets: []int64{1, 2, -1, -1}, - NegativeBuckets: []int64{1, 3, -2, -1, 1}, - }, - lset: labels.FromStrings( - "__name__", "test_histogram", - ), - e: []exemplar.Exemplar{ - {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, - }, - }, - { - m: "test_gauge_histogram", - help: "Like test_histogram but as gauge histogram.", - }, - { - m: "test_gauge_histogram", - typ: MetricTypeGaugeHistogram, - }, - { - m: "test_gauge_histogram", - t: 1234568, - shs: &histogram.Histogram{ - CounterResetHint: histogram.GaugeType, - Count: 175, - ZeroCount: 2, - Sum: 0.0008280461746287094, - ZeroThreshold: 2.938735877055719e-39, - Schema: 3, - PositiveSpans: []histogram.Span{ - {Offset: -161, Length: 1}, - {Offset: 8, Length: 3}, + { + m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)", + v: 1, + lset: labels.FromStrings( + "__name__", "go_build_info", + "checksum", "", + "path", "github.com/prometheus/client_golang", + "version", "(devel)", + ), }, - NegativeSpans: []histogram.Span{ - {Offset: -162, Length: 1}, - {Offset: 23, Length: 4}, + { + m: "go_memstats_alloc_bytes_total", + help: "Total number of bytes allocated, even if freed.", }, - PositiveBuckets: []int64{1, 2, -1, -1}, - NegativeBuckets: []int64{1, 3, -2, -1, 1}, - }, - lset: labels.FromStrings( - "__name__", "test_gauge_histogram", - ), - e: []exemplar.Exemplar{ - {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, - }, - }, - { - m: "test_float_histogram", - help: "Test float histogram with many buckets removed to keep it manageable in size.", - }, - { - m: "test_float_histogram", - typ: MetricTypeHistogram, - }, - { - m: "test_float_histogram", - t: 1234568, - fhs: &histogram.FloatHistogram{ - Count: 175.0, - ZeroCount: 2.0, - Sum: 0.0008280461746287094, - ZeroThreshold: 2.938735877055719e-39, - Schema: 3, - PositiveSpans: []histogram.Span{ - {Offset: -161, Length: 1}, - {Offset: 8, Length: 3}, + { + m: "go_memstats_alloc_bytes_total", + typ: MetricTypeCounter, }, - NegativeSpans: []histogram.Span{ - {Offset: -162, Length: 1}, - {Offset: 23, Length: 4}, + { + m: "go_memstats_alloc_bytes_total", + v: 1.546544e+06, + lset: labels.FromStrings( + "__name__", "go_memstats_alloc_bytes_total", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233}, + }, }, - PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0}, - NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0}, - }, - lset: labels.FromStrings( - "__name__", "test_float_histogram", - ), - e: []exemplar.Exemplar{ - {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, - }, - }, - { - m: "test_gauge_float_histogram", - help: "Like test_float_histogram but as gauge histogram.", - }, - { - m: "test_gauge_float_histogram", - typ: MetricTypeGaugeHistogram, - }, - { - m: "test_gauge_float_histogram", - t: 1234568, - fhs: &histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Count: 175.0, - ZeroCount: 2.0, - Sum: 0.0008280461746287094, - ZeroThreshold: 2.938735877055719e-39, - Schema: 3, - PositiveSpans: []histogram.Span{ - {Offset: -161, Length: 1}, - {Offset: 8, Length: 3}, + { + m: "something_untyped", + help: "Just to test the untyped type.", }, - NegativeSpans: []histogram.Span{ - {Offset: -162, Length: 1}, - {Offset: 23, Length: 4}, + { + m: "something_untyped", + typ: MetricTypeUnknown, + }, + { + m: "something_untyped", + t: 1234567, + v: 42, + lset: labels.FromStrings( + "__name__", "something_untyped", + ), + }, + { + m: "test_histogram", + help: "Test histogram with many buckets removed to keep it manageable in size.", + }, + { + m: "test_histogram", + typ: MetricTypeHistogram, + }, + { + m: "test_histogram", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { + m: "test_gauge_histogram", + help: "Like test_histogram but as gauge histogram.", + }, + { + m: "test_gauge_histogram", + typ: MetricTypeGaugeHistogram, + }, + { + m: "test_gauge_histogram", + t: 1234568, + shs: &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { + m: "test_float_histogram", + help: "Test float histogram with many buckets removed to keep it manageable in size.", + }, + { + m: "test_float_histogram", + typ: MetricTypeHistogram, + }, + { + m: "test_float_histogram", + t: 1234568, + fhs: &histogram.FloatHistogram{ + Count: 175.0, + ZeroCount: 2.0, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0}, + NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0}, + }, + lset: labels.FromStrings( + "__name__", "test_float_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { + m: "test_gauge_float_histogram", + help: "Like test_float_histogram but as gauge histogram.", + }, + { + m: "test_gauge_float_histogram", + typ: MetricTypeGaugeHistogram, + }, + { + m: "test_gauge_float_histogram", + t: 1234568, + fhs: &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Count: 175.0, + ZeroCount: 2.0, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0}, + NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0}, + }, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { + m: "test_histogram2", + help: "Similar histogram as before but now without sparse buckets.", + }, + { + m: "test_histogram2", + typ: MetricTypeHistogram, + }, + { + m: "test_histogram2_count", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram2_count", + ), + }, + { + m: "test_histogram2_sum", + v: 0.000828, + lset: labels.FromStrings( + "__name__", "test_histogram2_sum", + ), + }, + { + m: "test_histogram2_bucket\xffle\xff-0.00048", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "-0.00048", + ), + }, + { + m: "test_histogram2_bucket\xffle\xff-0.00038", + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "-0.00038", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, + }, + }, + { + m: "test_histogram2_bucket\xffle\xff1.0", + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "1.0", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false}, + }, + }, + { + m: "test_histogram2_bucket\xffle\xff+Inf", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "+Inf", + ), + }, + { + m: "rpc_durations_seconds", + help: "RPC latency distributions.", + }, + { + m: "rpc_durations_seconds", + typ: MetricTypeSummary, + }, + { + m: "rpc_durations_seconds_count\xffservice\xffexponential", + v: 262, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds_count", + "service", "exponential", + ), + }, + { + m: "rpc_durations_seconds_sum\xffservice\xffexponential", + v: 0.00025551262820703587, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds_sum", + "service", "exponential", + ), + }, + { + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5", + v: 6.442786329648548e-07, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.5", + "service", "exponential", + ), + }, + { + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9", + v: 1.9435742936658396e-06, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.9", + "service", "exponential", + ), + }, + { + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99", + v: 4.0471608667037015e-06, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.99", + "service", "exponential", + ), + }, + { + m: "without_quantiles", + help: "A summary without quantiles.", + }, + { + m: "without_quantiles", + typ: MetricTypeSummary, + }, + { + m: "without_quantiles_count", + v: 42, + lset: labels.FromStrings( + "__name__", "without_quantiles_count", + ), + }, + { + m: "without_quantiles_sum", + v: 1.234, + lset: labels.FromStrings( + "__name__", "without_quantiles_sum", + ), }, - PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0}, - NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0}, - }, - lset: labels.FromStrings( - "__name__", "test_gauge_float_histogram", - ), - e: []exemplar.Exemplar{ - {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, { - m: "test_histogram2", - help: "Similar histogram as before but now without sparse buckets.", - }, - { - m: "test_histogram2", - typ: MetricTypeHistogram, - }, - { - m: "test_histogram2_count", - v: 175, - lset: labels.FromStrings( - "__name__", "test_histogram2_count", - ), - }, - { - m: "test_histogram2_sum", - v: 0.000828, - lset: labels.FromStrings( - "__name__", "test_histogram2_sum", - ), - }, - { - m: "test_histogram2_bucket\xffle\xff-0.00048", - v: 2, - lset: labels.FromStrings( - "__name__", "test_histogram2_bucket", - "le", "-0.00048", - ), - }, - { - m: "test_histogram2_bucket\xffle\xff-0.00038", - v: 4, - lset: labels.FromStrings( - "__name__", "test_histogram2_bucket", - "le", "-0.00038", - ), - e: []exemplar.Exemplar{ - {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, + name: "parse classic and native buckets", + parser: NewProtobufParser(inputBuf.Bytes(), true), + expected: []parseResult{ + { // 0 + m: "go_build_info", + help: "Build information about the main Go module.", + }, + { // 1 + m: "go_build_info", + typ: MetricTypeGauge, + }, + { // 2 + m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)", + v: 1, + lset: labels.FromStrings( + "__name__", "go_build_info", + "checksum", "", + "path", "github.com/prometheus/client_golang", + "version", "(devel)", + ), + }, + { // 3 + m: "go_memstats_alloc_bytes_total", + help: "Total number of bytes allocated, even if freed.", + }, + { // 4 + m: "go_memstats_alloc_bytes_total", + typ: MetricTypeCounter, + }, + { // 5 + m: "go_memstats_alloc_bytes_total", + v: 1.546544e+06, + lset: labels.FromStrings( + "__name__", "go_memstats_alloc_bytes_total", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233}, + }, + }, + { // 6 + m: "something_untyped", + help: "Just to test the untyped type.", + }, + { // 7 + m: "something_untyped", + typ: MetricTypeUnknown, + }, + { // 8 + m: "something_untyped", + t: 1234567, + v: 42, + lset: labels.FromStrings( + "__name__", "something_untyped", + ), + }, + { // 9 + m: "test_histogram", + help: "Test histogram with many buckets removed to keep it manageable in size.", + }, + { // 10 + m: "test_histogram", + typ: MetricTypeHistogram, + }, + { // 11 + m: "test_histogram", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 12 + m: "test_histogram_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_count", + ), + }, + { // 13 + m: "test_histogram_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_histogram_sum", + ), + }, + { // 14 + m: "test_histogram_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 15 + m: "test_histogram_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram_bucket", + "le", "-0.0003899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 16 + m: "test_histogram_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram_bucket", + "le", "-0.0002899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 17 + m: "test_histogram_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_bucket", + "le", "+Inf", + ), + }, + { // 18 + m: "test_gauge_histogram", + help: "Like test_histogram but as gauge histogram.", + }, + { // 19 + m: "test_gauge_histogram", + typ: MetricTypeGaugeHistogram, + }, + { // 20 + m: "test_gauge_histogram", + t: 1234568, + shs: &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 21 + m: "test_gauge_histogram_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_count", + ), + }, + { // 22 + m: "test_gauge_histogram_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_sum", + ), + }, + { // 23 + m: "test_gauge_histogram_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 24 + m: "test_gauge_histogram_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_bucket", + "le", "-0.0003899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 25 + m: "test_gauge_histogram_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_bucket", + "le", "-0.0002899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 26 + m: "test_gauge_histogram_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_bucket", + "le", "+Inf", + ), + }, + { // 27 + m: "test_float_histogram", + help: "Test float histogram with many buckets removed to keep it manageable in size.", + }, + { // 28 + m: "test_float_histogram", + typ: MetricTypeHistogram, + }, + { // 29 + m: "test_float_histogram", + t: 1234568, + fhs: &histogram.FloatHistogram{ + Count: 175.0, + ZeroCount: 2.0, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0}, + NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0}, + }, + lset: labels.FromStrings( + "__name__", "test_float_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 30 + m: "test_float_histogram_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_float_histogram_count", + ), + }, + { // 31 + m: "test_float_histogram_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_float_histogram_sum", + ), + }, + { // 32 + m: "test_float_histogram_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_float_histogram_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 33 + m: "test_float_histogram_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_float_histogram_bucket", + "le", "-0.0003899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 34 + m: "test_float_histogram_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_float_histogram_bucket", + "le", "-0.0002899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 35 + m: "test_float_histogram_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_float_histogram_bucket", + "le", "+Inf", + ), + }, + { // 36 + m: "test_gauge_float_histogram", + help: "Like test_float_histogram but as gauge histogram.", + }, + { // 37 + m: "test_gauge_float_histogram", + typ: MetricTypeGaugeHistogram, + }, + { // 38 + m: "test_gauge_float_histogram", + t: 1234568, + fhs: &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Count: 175.0, + ZeroCount: 2.0, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0}, + NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0}, + }, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 39 + m: "test_gauge_float_histogram_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_count", + ), + }, + { // 40 + m: "test_gauge_float_histogram_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_sum", + ), + }, + { // 41 + m: "test_gauge_float_histogram_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 42 + m: "test_gauge_float_histogram_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_bucket", + "le", "-0.0003899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 43 + m: "test_gauge_float_histogram_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_bucket", + "le", "-0.0002899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 44 + m: "test_gauge_float_histogram_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_bucket", + "le", "+Inf", + ), + }, + { // 45 + m: "test_histogram2", + help: "Similar histogram as before but now without sparse buckets.", + }, + { // 46 + m: "test_histogram2", + typ: MetricTypeHistogram, + }, + { // 47 + m: "test_histogram2_count", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram2_count", + ), + }, + { // 48 + m: "test_histogram2_sum", + v: 0.000828, + lset: labels.FromStrings( + "__name__", "test_histogram2_sum", + ), + }, + { // 49 + m: "test_histogram2_bucket\xffle\xff-0.00048", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "-0.00048", + ), + }, + { // 50 + m: "test_histogram2_bucket\xffle\xff-0.00038", + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "-0.00038", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, + }, + }, + { // 51 + m: "test_histogram2_bucket\xffle\xff1.0", + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "1.0", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false}, + }, + }, + { // 52 + m: "test_histogram2_bucket\xffle\xff+Inf", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "+Inf", + ), + }, + { // 53 + m: "rpc_durations_seconds", + help: "RPC latency distributions.", + }, + { // 54 + m: "rpc_durations_seconds", + typ: MetricTypeSummary, + }, + { // 55 + m: "rpc_durations_seconds_count\xffservice\xffexponential", + v: 262, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds_count", + "service", "exponential", + ), + }, + { // 56 + m: "rpc_durations_seconds_sum\xffservice\xffexponential", + v: 0.00025551262820703587, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds_sum", + "service", "exponential", + ), + }, + { // 57 + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5", + v: 6.442786329648548e-07, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.5", + "service", "exponential", + ), + }, + { // 58 + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9", + v: 1.9435742936658396e-06, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.9", + "service", "exponential", + ), + }, + { // 59 + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99", + v: 4.0471608667037015e-06, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.99", + "service", "exponential", + ), + }, + { // 60 + m: "without_quantiles", + help: "A summary without quantiles.", + }, + { // 61 + m: "without_quantiles", + typ: MetricTypeSummary, + }, + { // 62 + m: "without_quantiles_count", + v: 42, + lset: labels.FromStrings( + "__name__", "without_quantiles_count", + ), + }, + { // 63 + m: "without_quantiles_sum", + v: 1.234, + lset: labels.FromStrings( + "__name__", "without_quantiles_sum", + ), + }, }, }, - { - m: "test_histogram2_bucket\xffle\xff1.0", - v: 16, - lset: labels.FromStrings( - "__name__", "test_histogram2_bucket", - "le", "1.0", - ), - e: []exemplar.Exemplar{ - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false}, - }, - }, - { - m: "test_histogram2_bucket\xffle\xff+Inf", - v: 175, - lset: labels.FromStrings( - "__name__", "test_histogram2_bucket", - "le", "+Inf", - ), - }, - { - m: "rpc_durations_seconds", - help: "RPC latency distributions.", - }, - { - m: "rpc_durations_seconds", - typ: MetricTypeSummary, - }, - { - m: "rpc_durations_seconds_count\xffservice\xffexponential", - v: 262, - lset: labels.FromStrings( - "__name__", "rpc_durations_seconds_count", - "service", "exponential", - ), - }, - { - m: "rpc_durations_seconds_sum\xffservice\xffexponential", - v: 0.00025551262820703587, - lset: labels.FromStrings( - "__name__", "rpc_durations_seconds_sum", - "service", "exponential", - ), - }, - { - m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5", - v: 6.442786329648548e-07, - lset: labels.FromStrings( - "__name__", "rpc_durations_seconds", - "quantile", "0.5", - "service", "exponential", - ), - }, - { - m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9", - v: 1.9435742936658396e-06, - lset: labels.FromStrings( - "__name__", "rpc_durations_seconds", - "quantile", "0.9", - "service", "exponential", - ), - }, - { - m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99", - v: 4.0471608667037015e-06, - lset: labels.FromStrings( - "__name__", "rpc_durations_seconds", - "quantile", "0.99", - "service", "exponential", - ), - }, - { - m: "without_quantiles", - help: "A summary without quantiles.", - }, - { - m: "without_quantiles", - typ: MetricTypeSummary, - }, - { - m: "without_quantiles_count", - v: 42, - lset: labels.FromStrings( - "__name__", "without_quantiles_count", - ), - }, - { - m: "without_quantiles_sum", - v: 1.234, - lset: labels.FromStrings( - "__name__", "without_quantiles_sum", - ), - }, } - p := NewProtobufParser(inputBuf.Bytes()) - i := 0 + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + var ( + i int + res labels.Labels + p = scenario.parser + exp = scenario.expected + ) - var res labels.Labels + for { + et, err := p.Next() + if errors.Is(err, io.EOF) { + break + } + require.NoError(t, err) - for { - et, err := p.Next() - if errors.Is(err, io.EOF) { - break - } - require.NoError(t, err) + switch et { + case EntrySeries: + m, ts, v := p.Series() - switch et { - case EntrySeries: - m, ts, v := p.Series() + var e exemplar.Exemplar + p.Metric(&res) + found := p.Exemplar(&e) + require.Equal(t, exp[i].m, string(m)) + if ts != nil { + require.Equal(t, exp[i].t, *ts) + } else { + require.Equal(t, exp[i].t, int64(0)) + } + require.Equal(t, exp[i].v, v) + require.Equal(t, exp[i].lset, res) + if len(exp[i].e) == 0 { + require.Equal(t, false, found) + } else { + require.Equal(t, true, found) + require.Equal(t, exp[i].e[0], e) + } - var e exemplar.Exemplar - p.Metric(&res) - found := p.Exemplar(&e) - require.Equal(t, exp[i].m, string(m)) - if ts != nil { - require.Equal(t, exp[i].t, *ts) - } else { - require.Equal(t, exp[i].t, int64(0)) + case EntryHistogram: + m, ts, shs, fhs := p.Histogram() + p.Metric(&res) + require.Equal(t, exp[i].m, string(m)) + if ts != nil { + require.Equal(t, exp[i].t, *ts) + } else { + require.Equal(t, exp[i].t, int64(0)) + } + require.Equal(t, exp[i].lset, res) + require.Equal(t, exp[i].m, string(m)) + if shs != nil { + require.Equal(t, exp[i].shs, shs) + } else { + require.Equal(t, exp[i].fhs, fhs) + } + j := 0 + for e := (exemplar.Exemplar{}); p.Exemplar(&e); j++ { + require.Equal(t, exp[i].e[j], e) + e = exemplar.Exemplar{} + } + require.Equal(t, len(exp[i].e), j, "not enough exemplars found") + + case EntryType: + m, typ := p.Type() + require.Equal(t, exp[i].m, string(m)) + require.Equal(t, exp[i].typ, typ) + + case EntryHelp: + m, h := p.Help() + require.Equal(t, exp[i].m, string(m)) + require.Equal(t, exp[i].help, string(h)) + + case EntryUnit: + m, u := p.Unit() + require.Equal(t, exp[i].m, string(m)) + require.Equal(t, exp[i].unit, string(u)) + + case EntryComment: + require.Equal(t, exp[i].comment, string(p.Comment())) + } + + i++ } - require.Equal(t, exp[i].v, v) - require.Equal(t, exp[i].lset, res) - if len(exp[i].e) == 0 { - require.Equal(t, false, found) - } else { - require.Equal(t, true, found) - require.Equal(t, exp[i].e[0], e) - } - - case EntryHistogram: - m, ts, shs, fhs := p.Histogram() - p.Metric(&res) - require.Equal(t, exp[i].m, string(m)) - if ts != nil { - require.Equal(t, exp[i].t, *ts) - } else { - require.Equal(t, exp[i].t, int64(0)) - } - require.Equal(t, exp[i].lset, res) - require.Equal(t, exp[i].m, string(m)) - if shs != nil { - require.Equal(t, exp[i].shs, shs) - } else { - require.Equal(t, exp[i].fhs, fhs) - } - j := 0 - for e := (exemplar.Exemplar{}); p.Exemplar(&e); j++ { - require.Equal(t, exp[i].e[j], e) - e = exemplar.Exemplar{} - } - require.Equal(t, len(exp[i].e), j, "not enough exemplars found") - - case EntryType: - m, typ := p.Type() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].typ, typ) - - case EntryHelp: - m, h := p.Help() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].help, string(h)) - - case EntryUnit: - m, u := p.Unit() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].unit, string(u)) - - case EntryComment: - require.Equal(t, exp[i].comment, string(p.Comment())) - } - - i++ + require.Equal(t, len(exp), i) + }) } - require.Equal(t, len(exp), i) } diff --git a/promql/fuzz.go b/promql/fuzz.go index 39933378e..aff6eb15b 100644 --- a/promql/fuzz.go +++ b/promql/fuzz.go @@ -58,7 +58,7 @@ const ( ) func fuzzParseMetricWithContentType(in []byte, contentType string) int { - p, warning := textparse.New(in, contentType) + p, warning := textparse.New(in, contentType, false) if warning != nil { // An invalid content type is being passed, which should not happen // in this context. diff --git a/scrape/scrape.go b/scrape/scrape.go index f094ee825..a97cbf539 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -260,17 +260,18 @@ type labelLimits struct { } type scrapeLoopOptions struct { - target *Target - scraper scraper - sampleLimit int - bucketLimit int - labelLimits *labelLimits - honorLabels bool - honorTimestamps bool - interval time.Duration - timeout time.Duration - mrc []*relabel.Config - cache *scrapeCache + target *Target + scraper scraper + sampleLimit int + bucketLimit int + labelLimits *labelLimits + honorLabels bool + honorTimestamps bool + interval time.Duration + timeout time.Duration + scrapeClassicHistograms bool + mrc []*relabel.Config + cache *scrapeCache } const maxAheadTime = 10 * time.Minute @@ -331,6 +332,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed opts.labelLimits, opts.interval, opts.timeout, + opts.scrapeClassicHistograms, options.ExtraMetrics, options.EnableMetadataStorage, opts.target, @@ -547,9 +549,10 @@ func (sp *scrapePool) sync(targets []*Target) { labelNameLengthLimit: int(sp.config.LabelNameLengthLimit), labelValueLengthLimit: int(sp.config.LabelValueLengthLimit), } - honorLabels = sp.config.HonorLabels - honorTimestamps = sp.config.HonorTimestamps - mrc = sp.config.MetricRelabelConfigs + honorLabels = sp.config.HonorLabels + honorTimestamps = sp.config.HonorTimestamps + mrc = sp.config.MetricRelabelConfigs + scrapeClassicHistograms = sp.config.ScrapeClassicHistograms ) sp.targetMtx.Lock() @@ -568,16 +571,17 @@ func (sp *scrapePool) sync(targets []*Target) { } s := &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, acceptHeader: acceptHeader} l := sp.newLoop(scrapeLoopOptions{ - target: t, - scraper: s, - sampleLimit: sampleLimit, - bucketLimit: bucketLimit, - labelLimits: labelLimits, - honorLabels: honorLabels, - honorTimestamps: honorTimestamps, - mrc: mrc, - interval: interval, - timeout: timeout, + target: t, + scraper: s, + sampleLimit: sampleLimit, + bucketLimit: bucketLimit, + labelLimits: labelLimits, + honorLabels: honorLabels, + honorTimestamps: honorTimestamps, + mrc: mrc, + interval: interval, + timeout: timeout, + scrapeClassicHistograms: scrapeClassicHistograms, }) if err != nil { l.setForcedError(err) @@ -882,20 +886,21 @@ type cacheEntry struct { } type scrapeLoop struct { - scraper scraper - l log.Logger - cache *scrapeCache - lastScrapeSize int - buffers *pool.Pool - jitterSeed uint64 - honorTimestamps bool - forcedErr error - forcedErrMtx sync.Mutex - sampleLimit int - bucketLimit int - labelLimits *labelLimits - interval time.Duration - timeout time.Duration + scraper scraper + l log.Logger + cache *scrapeCache + lastScrapeSize int + buffers *pool.Pool + jitterSeed uint64 + honorTimestamps bool + forcedErr error + forcedErrMtx sync.Mutex + sampleLimit int + bucketLimit int + labelLimits *labelLimits + interval time.Duration + timeout time.Duration + scrapeClassicHistograms bool appender func(ctx context.Context) storage.Appender sampleMutator labelsMutator @@ -1177,6 +1182,7 @@ func newScrapeLoop(ctx context.Context, labelLimits *labelLimits, interval time.Duration, timeout time.Duration, + scrapeClassicHistograms bool, reportExtraMetrics bool, appendMetadataToWAL bool, target *Target, @@ -1204,25 +1210,26 @@ func newScrapeLoop(ctx context.Context, } sl := &scrapeLoop{ - scraper: sc, - buffers: buffers, - cache: cache, - appender: appender, - sampleMutator: sampleMutator, - reportSampleMutator: reportSampleMutator, - stopped: make(chan struct{}), - jitterSeed: jitterSeed, - l: l, - parentCtx: ctx, - appenderCtx: appenderCtx, - honorTimestamps: honorTimestamps, - sampleLimit: sampleLimit, - bucketLimit: bucketLimit, - labelLimits: labelLimits, - interval: interval, - timeout: timeout, - reportExtraMetrics: reportExtraMetrics, - appendMetadataToWAL: appendMetadataToWAL, + scraper: sc, + buffers: buffers, + cache: cache, + appender: appender, + sampleMutator: sampleMutator, + reportSampleMutator: reportSampleMutator, + stopped: make(chan struct{}), + jitterSeed: jitterSeed, + l: l, + parentCtx: ctx, + appenderCtx: appenderCtx, + honorTimestamps: honorTimestamps, + sampleLimit: sampleLimit, + bucketLimit: bucketLimit, + labelLimits: labelLimits, + interval: interval, + timeout: timeout, + scrapeClassicHistograms: scrapeClassicHistograms, + reportExtraMetrics: reportExtraMetrics, + appendMetadataToWAL: appendMetadataToWAL, } sl.ctx, sl.cancel = context.WithCancel(ctx) @@ -1492,7 +1499,7 @@ type appendErrors struct { } func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { - p, err := textparse.New(b, contentType) + p, err := textparse.New(b, contentType, sl.scrapeClassicHistograms) if err != nil { level.Debug(sl.l).Log( "msg", "Invalid content type on scrape, using prometheus parser as fallback.", diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 7f6dea657..6c45c26b4 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -633,6 +633,7 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) { 0, false, false, + false, nil, false, ) @@ -705,6 +706,7 @@ func TestScrapeLoopStop(t *testing.T) { time.Hour, false, false, + false, nil, false, ) @@ -781,6 +783,7 @@ func TestScrapeLoopRun(t *testing.T) { time.Hour, false, false, + false, nil, false, ) @@ -836,6 +839,7 @@ func TestScrapeLoopRun(t *testing.T) { 100*time.Millisecond, false, false, + false, nil, false, ) @@ -895,6 +899,7 @@ func TestScrapeLoopForcedErr(t *testing.T) { time.Hour, false, false, + false, nil, false, ) @@ -953,6 +958,7 @@ func TestScrapeLoopMetadata(t *testing.T) { 0, false, false, + false, nil, false, ) @@ -1010,6 +1016,7 @@ func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) { 0, false, false, + false, nil, false, ) @@ -1070,6 +1077,7 @@ func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) { 0, false, false, + false, nil, false, ) @@ -1148,6 +1156,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { time.Hour, false, false, + false, nil, false, ) @@ -1211,6 +1220,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { time.Hour, false, false, + false, nil, false, ) @@ -1277,6 +1287,7 @@ func TestScrapeLoopCache(t *testing.T) { time.Hour, false, false, + false, nil, false, ) @@ -1360,6 +1371,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) { time.Hour, false, false, + false, nil, false, ) @@ -1474,6 +1486,7 @@ func TestScrapeLoopAppend(t *testing.T) { 0, false, false, + false, nil, false, ) @@ -1563,7 +1576,7 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) { return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil) }, nil, - func(ctx context.Context) storage.Appender { return app }, nil, 0, true, 0, 0, nil, 0, 0, false, false, nil, false, + func(ctx context.Context) storage.Appender { return app }, nil, 0, true, 0, 0, nil, 0, 0, false, false, false, nil, false, ) slApp := sl.appender(context.Background()) _, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)) @@ -1600,6 +1613,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { 0, false, false, + false, nil, false, ) @@ -1607,7 +1621,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { fakeRef := storage.SeriesRef(1) expValue := float64(1) metric := []byte(`metric{n="1"} 1`) - p, warning := textparse.New(metric, "") + p, warning := textparse.New(metric, "", false) require.NoError(t, warning) var lset labels.Labels @@ -1658,6 +1672,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) { 0, false, false, + false, nil, false, ) @@ -1735,6 +1750,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { 0, false, false, + false, nil, false, ) @@ -1833,6 +1849,7 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) { 0, false, false, + false, nil, false, ) @@ -1881,6 +1898,7 @@ func TestScrapeLoopAppendStaleness(t *testing.T) { 0, false, false, + false, nil, false, ) @@ -1932,6 +1950,7 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) { 0, false, false, + false, nil, false, ) @@ -2043,6 +2062,7 @@ metric_total{n="2"} 2 # {t="2"} 2.0 20000 0, false, false, + false, nil, false, ) @@ -2108,6 +2128,7 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) { 0, false, false, + false, nil, false, ) @@ -2160,6 +2181,7 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) { time.Hour, false, false, + false, nil, false, ) @@ -2196,6 +2218,7 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) { time.Hour, false, false, + false, nil, false, ) @@ -2245,6 +2268,7 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T 0, false, false, + false, nil, false, ) @@ -2290,6 +2314,7 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) { 0, false, false, + false, nil, false, ) @@ -2562,6 +2587,7 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) { 0, false, false, + false, nil, false, ) @@ -2603,6 +2629,7 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) { 0, false, false, + false, nil, false, ) @@ -2643,6 +2670,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) { 0, false, false, + false, nil, false, ) @@ -2701,6 +2729,7 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) { 0, false, false, + false, nil, false, ) @@ -2964,6 +2993,7 @@ func TestScrapeAddFast(t *testing.T) { 0, false, false, + false, nil, false, ) @@ -3050,6 +3080,7 @@ func TestScrapeReportSingleAppender(t *testing.T) { time.Hour, false, false, + false, nil, false, ) @@ -3252,6 +3283,7 @@ func TestScrapeLoopLabelLimit(t *testing.T) { 0, false, false, + false, nil, false, ) diff --git a/web/federate_test.go b/web/federate_test.go index 61ef62f46..bf7b6fefe 100644 --- a/web/federate_test.go +++ b/web/federate_test.go @@ -378,7 +378,7 @@ func TestFederationWithNativeHistograms(t *testing.T) { body, err := io.ReadAll(res.Body) require.NoError(t, err) - p := textparse.NewProtobufParser(body) + p := textparse.NewProtobufParser(body, false) var actVec promql.Vector metricFamilies := 0 l := labels.Labels{} From 1ac5131f698ebc60f13fe2727f89b115a41f6558 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sun, 14 May 2023 07:13:03 +0100 Subject: [PATCH 139/632] Release 2.44.0 (#12364) Signed-off-by: Bryan Boreham --- CHANGELOG.md | 11 ++--------- VERSION | 2 +- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/react-app/package.json | 4 ++-- 6 files changed, 15 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ebc602800..f7dc32273 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,14 +1,6 @@ # Changelog -## 2.44.0-rc.2 / 2023-05-07 - -* [ENHANCEMENT] Storage: Optimise buffer used to iterate through samples. #12326 - -## 2.44.0-rc.1 / 2023-05-03 - -* [BUGFIX] Labels: Set after Del would be ignored, which broke some relabeling rules. #12322 - -## 2.44.0-rc.0 / 2023-04-22 +## 2.44.0 / 2023-05-13 This version is built with Go tag `stringlabels`, to use the smaller data structure for Labels that was optional in the previous release. For more @@ -18,6 +10,7 @@ details about this code change see #10991. * [FEATURE] Remote-read: Handle native histograms. #12085, #12192 * [FEATURE] Promtool: Health and readiness check of prometheus server in CLI. #12096 * [FEATURE] PromQL: Add `query_samples_total` metric, the total number of samples loaded by all queries. #12251 +* [ENHANCEMENT] Storage: Optimise buffer used to iterate through samples. #12326 * [ENHANCEMENT] Scrape: Reduce memory allocations on target labels. #12084 * [ENHANCEMENT] PromQL: Use faster heap method for `topk()` / `bottomk()`. #12190 * [ENHANCEMENT] Rules API: Allow filtering by rule name. #12270 diff --git a/VERSION b/VERSION index 3f6719d55..3e197472e 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.44.0-rc.2 +2.44.0 diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 048b1aab4..e819319ee 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.44.0-rc.2", + "version": "0.44.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.44.0-rc.2", + "@prometheus-io/lezer-promql": "0.44.0", "lru-cache": "^6.0.0" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 622e9bea4..7daa56738 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.44.0-rc.2", + "version": "0.44.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 0cd9eb729..1d3d1d3e3 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -28,10 +28,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.44.0-rc.2", + "version": "0.44.0", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.44.0-rc.2", + "@prometheus-io/lezer-promql": "0.44.0", "lru-cache": "^6.0.0" }, "devDependencies": { @@ -61,7 +61,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.44.0-rc.2", + "version": "0.44.0", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.2.2", @@ -20763,7 +20763,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.44.0-rc.2", + "version": "0.44.0", "dependencies": { "@codemirror/autocomplete": "^6.4.0", "@codemirror/commands": "^6.2.0", @@ -20781,7 +20781,7 @@ "@lezer/lr": "^1.3.1", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.44.0-rc.2", + "@prometheus-io/codemirror-promql": "0.44.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.2.0", @@ -23417,7 +23417,7 @@ "@lezer/lr": "^1.3.1", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.44.0-rc.2", + "@prometheus-io/codemirror-promql": "0.44.0", "@testing-library/react-hooks": "^7.0.2", "@types/enzyme": "^3.10.12", "@types/flot": "0.0.32", @@ -23468,7 +23468,7 @@ "@lezer/common": "^1.0.2", "@lezer/highlight": "^1.1.3", "@lezer/lr": "^1.3.1", - "@prometheus-io/lezer-promql": "0.44.0-rc.2", + "@prometheus-io/lezer-promql": "0.44.0", "@types/lru-cache": "^5.1.1", "isomorphic-fetch": "^3.0.0", "lru-cache": "^6.0.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 1e0c4ed15..fc0d1e5fc 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.44.0-rc.2", + "version": "0.44.0", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.4.0", @@ -19,7 +19,7 @@ "@lezer/common": "^1.0.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.44.0-rc.2", + "@prometheus-io/codemirror-promql": "0.44.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.2.0", From c6618729c9054e00c126e03338f670109d8de7af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Thu, 11 May 2023 21:02:02 +0200 Subject: [PATCH 140/632] Fix HistogramAppender.Appendable array out of bound error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The code did not handle spans with 0 length properly. Spans with length zero are now skipped in the comparison. Span index check not done against length-1, since length is a unit32, thus subtracting 1 leads to 2^32, not -1. Fixes and unit tests for both integer and float histograms added. Signed-off-by: György Krajcsovits --- tsdb/chunkenc/float_histogram.go | 8 ++-- tsdb/chunkenc/float_histogram_test.go | 58 ++++++++++++++++++++++++ tsdb/chunkenc/histogram.go | 8 ++-- tsdb/chunkenc/histogram_meta.go | 7 +++ tsdb/chunkenc/histogram_test.go | 63 +++++++++++++++++++++++++++ 5 files changed, 136 insertions(+), 8 deletions(-) diff --git a/tsdb/chunkenc/float_histogram.go b/tsdb/chunkenc/float_histogram.go index 0349de9ab..d49885a1c 100644 --- a/tsdb/chunkenc/float_histogram.go +++ b/tsdb/chunkenc/float_histogram.go @@ -358,9 +358,9 @@ func counterResetInAnyFloatBucket(oldBuckets []xorValue, newBuckets []float64, o if oldIdx <= newIdx { // Moving ahead old bucket and span by 1 index. - if oldInsideSpanIdx == oldSpans[oldSpanSliceIdx].Length-1 { + if oldInsideSpanIdx+1 >= oldSpans[oldSpanSliceIdx].Length { // Current span is over. - oldSpanSliceIdx++ + oldSpanSliceIdx = nextNonEmptySpanSliceIdx(oldSpanSliceIdx, oldSpans) oldInsideSpanIdx = 0 if oldSpanSliceIdx >= len(oldSpans) { // All old spans are over. @@ -377,9 +377,9 @@ func counterResetInAnyFloatBucket(oldBuckets []xorValue, newBuckets []float64, o if oldIdx > newIdx { // Moving ahead new bucket and span by 1 index. - if newInsideSpanIdx == newSpans[newSpanSliceIdx].Length-1 { + if newInsideSpanIdx+1 >= newSpans[newSpanSliceIdx].Length { // Current span is over. - newSpanSliceIdx++ + newSpanSliceIdx = nextNonEmptySpanSliceIdx(newSpanSliceIdx, newSpans) newInsideSpanIdx = 0 if newSpanSliceIdx >= len(newSpans) { // All new spans are over. diff --git a/tsdb/chunkenc/float_histogram_test.go b/tsdb/chunkenc/float_histogram_test.go index 90c16d1ea..c662e5ffa 100644 --- a/tsdb/chunkenc/float_histogram_test.go +++ b/tsdb/chunkenc/float_histogram_test.go @@ -365,6 +365,64 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { } } +func TestFloatHistogramChunkAppendableWithEmptySpan(t *testing.T) { + h1 := &histogram.FloatHistogram{ + Schema: 0, + Count: 21, + Sum: 1234.5, + ZeroThreshold: 0.001, + ZeroCount: 4, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []float64{1, 2, 1, 1, 1, 1, 1}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 2, Length: 3}, + }, + NegativeBuckets: []float64{1, 2, 1, 2, 2, 2, 2}, + } + h2 := &histogram.FloatHistogram{ + Schema: 0, + Count: 37, + Sum: 2345.6, + ZeroThreshold: 0.001, + ZeroCount: 5, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []float64{1, 3, 1, 2, 1, 1, 1}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 2, Length: 3}, + }, + NegativeBuckets: []float64{1, 4, 2, 7, 5, 5, 2}, + } + + c := Chunk(NewFloatHistogramChunk()) + + // Create fresh appender and add the first histogram. + app, err := c.Appender() + require.NoError(t, err) + require.Equal(t, 0, c.NumSamples()) + + app.AppendFloatHistogram(1, h1) + require.Equal(t, 1, c.NumSamples()) + hApp, _ := app.(*FloatHistogramAppender) + + pI, nI, okToAppend, counterReset := hApp.Appendable(h2) + require.Empty(t, pI) + require.Empty(t, nI) + require.True(t, okToAppend) + require.False(t, counterReset) +} + func TestFloatHistogramChunkAppendableGauge(t *testing.T) { c := Chunk(NewFloatHistogramChunk()) diff --git a/tsdb/chunkenc/histogram.go b/tsdb/chunkenc/histogram.go index f9a63d18f..2350b2af2 100644 --- a/tsdb/chunkenc/histogram.go +++ b/tsdb/chunkenc/histogram.go @@ -386,9 +386,9 @@ func counterResetInAnyBucket(oldBuckets, newBuckets []int64, oldSpans, newSpans if oldIdx <= newIdx { // Moving ahead old bucket and span by 1 index. - if oldInsideSpanIdx == oldSpans[oldSpanSliceIdx].Length-1 { + if oldInsideSpanIdx+1 >= oldSpans[oldSpanSliceIdx].Length { // Current span is over. - oldSpanSliceIdx++ + oldSpanSliceIdx = nextNonEmptySpanSliceIdx(oldSpanSliceIdx, oldSpans) oldInsideSpanIdx = 0 if oldSpanSliceIdx >= len(oldSpans) { // All old spans are over. @@ -405,9 +405,9 @@ func counterResetInAnyBucket(oldBuckets, newBuckets []int64, oldSpans, newSpans if oldIdx > newIdx { // Moving ahead new bucket and span by 1 index. - if newInsideSpanIdx == newSpans[newSpanSliceIdx].Length-1 { + if newInsideSpanIdx+1 >= newSpans[newSpanSliceIdx].Length { // Current span is over. - newSpanSliceIdx++ + newSpanSliceIdx = nextNonEmptySpanSliceIdx(newSpanSliceIdx, newSpans) newInsideSpanIdx = 0 if newSpanSliceIdx >= len(newSpans) { // All new spans are over. diff --git a/tsdb/chunkenc/histogram_meta.go b/tsdb/chunkenc/histogram_meta.go index 027eee112..7a21bc20b 100644 --- a/tsdb/chunkenc/histogram_meta.go +++ b/tsdb/chunkenc/histogram_meta.go @@ -487,3 +487,10 @@ func counterResetHint(crh CounterResetHeader, numRead uint16) histogram.CounterR return histogram.UnknownCounterReset } } + +// Handle pathological case of empty span when advancing span idx. +func nextNonEmptySpanSliceIdx(idx int, spans []histogram.Span) (newIdx int) { + for idx++; idx < len(spans) && spans[idx].Length == 0; idx++ { + } + return idx +} diff --git a/tsdb/chunkenc/histogram_test.go b/tsdb/chunkenc/histogram_test.go index 45f31a3b4..9ef7e24a4 100644 --- a/tsdb/chunkenc/histogram_test.go +++ b/tsdb/chunkenc/histogram_test.go @@ -14,6 +14,7 @@ package chunkenc import ( + "fmt" "testing" "github.com/stretchr/testify/require" @@ -387,6 +388,64 @@ func TestHistogramChunkAppendable(t *testing.T) { } } +func TestHistogramChunkAppendableWithEmptySpan(t *testing.T) { + h1 := &histogram.Histogram{ + Schema: 0, + Count: 21, + Sum: 1234.5, + ZeroThreshold: 0.001, + ZeroCount: 4, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 1, -1, 0, 0, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 2, Length: 3}, + }, + NegativeBuckets: []int64{1, 1, -1, 1, 0, 0, 0}, + } + h2 := &histogram.Histogram{ + Schema: 0, + Count: 37, + Sum: 2345.6, + ZeroThreshold: 0.001, + ZeroCount: 5, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 2, Length: 3}, + }, + NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, + } + + c := Chunk(NewHistogramChunk()) + + // Create fresh appender and add the first histogram. + app, err := c.Appender() + require.NoError(t, err) + require.Equal(t, 0, c.NumSamples()) + + app.AppendHistogram(1, h1) + require.Equal(t, 1, c.NumSamples()) + hApp, _ := app.(*HistogramAppender) + + pI, nI, okToAppend, counterReset := hApp.Appendable(h2) + require.Empty(t, pI) + require.Empty(t, nI) + require.True(t, okToAppend) + require.False(t, counterReset) +} + func TestAtFloatHistogram(t *testing.T) { input := []histogram.Histogram{ { @@ -514,6 +573,10 @@ func TestAtFloatHistogram(t *testing.T) { app, err := chk.Appender() require.NoError(t, err) for i := range input { + if i > 0 { + _, _, okToAppend, _ := app.(*HistogramAppender).Appendable(&input[i]) + require.True(t, okToAppend, fmt.Sprintf("idx: %d", i)) + } app.AppendHistogram(int64(i), &input[i]) } it := chk.Iterator(nil) From 4c898fc4a1840d76ec4ffa1307e4aaf4f50dd7b3 Mon Sep 17 00:00:00 2001 From: Akshay Siwal Date: Mon, 15 May 2023 13:18:30 +0530 Subject: [PATCH 141/632] Update getting_started.md Updating signal for graceful shutdown. Signed-off-by: Akshay Siwal --- docs/getting_started.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/getting_started.md b/docs/getting_started.md index 11d8d0fb8..e89ac705e 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -264,4 +264,4 @@ process ID. While Prometheus does have recovery mechanisms in the case that there is an abrupt process failure it is recommend to use the `SIGTERM` signal to cleanly shutdown a Prometheus instance. If you're running on Linux this can be performed -by using `kill -s SIGHUP `, replacing `` with your Prometheus process ID. +by using `kill -s SIGTERM `, replacing `` with your Prometheus process ID. From c3f267d862cf91b6d55a5f2d218c5c6c0bd07ce3 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 22 Mar 2023 12:11:49 +0000 Subject: [PATCH 142/632] Alerts: more efficient generation of target labels Use a label builder instead of a slice when creating labels for the target alertmanagers. This can be passed directly to `relabel.ProcessBuilder`, skipping a copy. Signed-off-by: Bryan Boreham --- notifier/notifier.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/notifier/notifier.go b/notifier/notifier.go index c3b2e5c7e..d6705edd3 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -701,36 +701,38 @@ func postPath(pre string, v config.AlertmanagerAPIVersion) string { func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig) ([]alertmanager, []alertmanager, error) { var res []alertmanager var droppedAlertManagers []alertmanager + lb := labels.NewBuilder(labels.EmptyLabels()) for _, tlset := range tg.Targets { - lbls := make([]labels.Label, 0, len(tlset)+2+len(tg.Labels)) + lb.Reset(labels.EmptyLabels()) for ln, lv := range tlset { - lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)}) + lb.Set(string(ln), string(lv)) } // Set configured scheme as the initial scheme label for overwrite. - lbls = append(lbls, labels.Label{Name: model.SchemeLabel, Value: cfg.Scheme}) - lbls = append(lbls, labels.Label{Name: pathLabel, Value: postPath(cfg.PathPrefix, cfg.APIVersion)}) + lb.Set(model.SchemeLabel, cfg.Scheme) + lb.Set(pathLabel, postPath(cfg.PathPrefix, cfg.APIVersion)) // Combine target labels with target group labels. for ln, lv := range tg.Labels { if _, ok := tlset[ln]; !ok { - lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)}) + lb.Set(string(ln), string(lv)) } } - lset, keep := relabel.Process(labels.New(lbls...), cfg.RelabelConfigs...) + preRelabel := lb.Labels() + keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...) if !keep { - droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{labels.New(lbls...)}) + droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{preRelabel}) continue } - addr := lset.Get(model.AddressLabel) + addr := lb.Get(model.AddressLabel) if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { return nil, nil, err } - res = append(res, alertmanagerLabels{lset}) + res = append(res, alertmanagerLabels{lb.Labels()}) } return res, droppedAlertManagers, nil } From 3711339a7d4f787b136a2ef1b282dd9b9ea161b9 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 22 Mar 2023 12:26:17 +0000 Subject: [PATCH 143/632] Alerts: more efficient relabel on Send Re-use `labels.Builder` and use `relabel.ProcessBuilder` to skip a conversion step. Signed-off-by: Bryan Boreham --- notifier/notifier.go | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/notifier/notifier.go b/notifier/notifier.go index d6705edd3..891372c43 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -349,19 +349,6 @@ func (n *Manager) Send(alerts ...*Alert) { n.mtx.Lock() defer n.mtx.Unlock() - // Attach external labels before relabelling and sending. - for _, a := range alerts { - lb := labels.NewBuilder(a.Labels) - - n.opts.ExternalLabels.Range(func(l labels.Label) { - if a.Labels.Get(l.Name) == "" { - lb.Set(l.Name, l.Value) - } - }) - - a.Labels = lb.Labels() - } - alerts = n.relabelAlerts(alerts) if len(alerts) == 0 { return @@ -390,15 +377,25 @@ func (n *Manager) Send(alerts ...*Alert) { n.setMore() } +// Attach external labels and process relabelling rules. func (n *Manager) relabelAlerts(alerts []*Alert) []*Alert { + lb := labels.NewBuilder(labels.EmptyLabels()) var relabeledAlerts []*Alert - for _, alert := range alerts { - labels, keep := relabel.Process(alert.Labels, n.opts.RelabelConfigs...) - if keep { - alert.Labels = labels - relabeledAlerts = append(relabeledAlerts, alert) + for _, a := range alerts { + lb.Reset(a.Labels) + n.opts.ExternalLabels.Range(func(l labels.Label) { + if a.Labels.Get(l.Name) == "" { + lb.Set(l.Name, l.Value) + } + }) + + keep := relabel.ProcessBuilder(lb, n.opts.RelabelConfigs...) + if !keep { + continue } + a.Labels = lb.Labels() + relabeledAlerts = append(relabeledAlerts, a) } return relabeledAlerts } From 0d2108ad7969ed4b6dbd9b76bc812e20d456332f Mon Sep 17 00:00:00 2001 From: Callum Styan Date: Mon, 15 May 2023 12:31:49 -0700 Subject: [PATCH 144/632] [tsdb] re-implement WAL watcher to read via a "notification" channel (#11949) * WIP implement WAL watcher reading via notifications over a channel from the TSDB code Signed-off-by: Callum Styan * Notify via head appenders Commit (finished all WAL logging) rather than on each WAL Log call Signed-off-by: Callum Styan * Fix misspelled Notify plus add a metric for dropped Write notifications Signed-off-by: Callum Styan * Update tests to handle new notification pattern Signed-off-by: Callum Styan * this test maybe needs more time on windows? Signed-off-by: Callum Styan * does this test need more time on windows as well? Signed-off-by: Callum Styan * read timeout is already a time.Duration Signed-off-by: Callum Styan * remove mistakenly commited benchmark data files Signed-off-by: Callum Styan * address some review feedback Signed-off-by: Callum Styan * fix missed changes from previous commit Signed-off-by: Callum Styan * Fix issues from wrapper function Signed-off-by: Callum Styan * try fixing race condition in test by allowing tests to overwrite the read ticker timeout instead of calling the Notify function Signed-off-by: Callum Styan * fix linting Signed-off-by: Callum Styan --------- Signed-off-by: Callum Styan --- cmd/prometheus/main.go | 1 + storage/remote/storage.go | 7 +++ tsdb/db.go | 9 ++++ tsdb/head.go | 2 + tsdb/head_append.go | 4 ++ tsdb/wlog/watcher.go | 100 +++++++++++++++++++++++++++++--------- tsdb/wlog/watcher_test.go | 39 ++++++++------- tsdb/wlog/wlog.go | 6 +++ 8 files changed, 128 insertions(+), 40 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index c883ab47e..778b131c8 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -1053,6 +1053,7 @@ func main() { startTimeMargin := int64(2 * time.Duration(cfg.tsdb.MinBlockDuration).Seconds() * 1000) localStorage.Set(db, startTimeMargin) + db.SetWriteNotified(remoteStorage) close(dbOpen) <-cancel return nil diff --git a/storage/remote/storage.go b/storage/remote/storage.go index 88892e62e..d01f96b3b 100644 --- a/storage/remote/storage.go +++ b/storage/remote/storage.go @@ -76,6 +76,13 @@ func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCal return s } +func (s *Storage) Notify() { + for _, q := range s.rws.queues { + // These should all be non blocking + q.watcher.Notify() + } +} + // ApplyConfig updates the state as the new config requires. func (s *Storage) ApplyConfig(conf *config.Config) error { s.mtx.Lock() diff --git a/tsdb/db.go b/tsdb/db.go index 0cd00c2c5..a0d0a4b26 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -229,6 +229,8 @@ type DB struct { // out-of-order compaction and vertical queries. oooWasEnabled atomic.Bool + writeNotified wlog.WriteNotified + registerer prometheus.Registerer } @@ -802,6 +804,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs if err != nil { return nil, err } + db.head.writeNotified = db.writeNotified // Register metrics after assigning the head block. db.metrics = newDBMetrics(db, r) @@ -2016,6 +2019,12 @@ func (db *DB) CleanTombstones() (err error) { return nil } +func (db *DB) SetWriteNotified(wn wlog.WriteNotified) { + db.writeNotified = wn + // It's possible we already created the head struct, so we should also set the WN for that. + db.head.writeNotified = wn +} + func isBlockDir(fi fs.DirEntry) bool { if !fi.IsDir() { return false diff --git a/tsdb/head.go b/tsdb/head.go index a3c135300..aa7a3c125 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -121,6 +121,8 @@ type Head struct { stats *HeadStats reg prometheus.Registerer + writeNotified wlog.WriteNotified + memTruncationInProcess atomic.Bool } diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 2af01a2d6..060d32b7f 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -842,6 +842,10 @@ func (a *headAppender) Commit() (err error) { return errors.Wrap(err, "write to WAL") } + if a.head.writeNotified != nil { + a.head.writeNotified.Notify() + } + // No errors logging to WAL, so pass the exemplars along to the in memory storage. for _, e := range a.exemplars { s := a.head.series.getByID(chunks.HeadSeriesRef(e.ref)) diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index 31f60feab..221e9607c 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -34,12 +34,16 @@ import ( ) const ( - readPeriod = 10 * time.Millisecond checkpointPeriod = 5 * time.Second segmentCheckPeriod = 100 * time.Millisecond consumer = "consumer" ) +var ( + ErrIgnorable = errors.New("ignore me") + readTimeout = 15 * time.Second +) + // WriteTo is an interface used by the Watcher to send the samples it's read // from the WAL on to somewhere else. Functions will be called concurrently // and it is left to the implementer to make sure they are safe. @@ -61,11 +65,17 @@ type WriteTo interface { SeriesReset(int) } +// Used to notifier the watcher that data has been written so that it can read. +type WriteNotified interface { + Notify() +} + type WatcherMetrics struct { recordsRead *prometheus.CounterVec recordDecodeFails *prometheus.CounterVec samplesSentPreTailing *prometheus.CounterVec currentSegment *prometheus.GaugeVec + notificationsSkipped *prometheus.CounterVec } // Watcher watches the TSDB WAL for a given WriteTo. @@ -88,9 +98,11 @@ type Watcher struct { recordDecodeFailsMetric prometheus.Counter samplesSentPreTailing prometheus.Counter currentSegmentMetric prometheus.Gauge + notificationsSkipped prometheus.Counter - quit chan struct{} - done chan struct{} + readNotify chan struct{} + quit chan struct{} + done chan struct{} // For testing, stop when we hit this segment. MaxSegment int @@ -134,6 +146,15 @@ func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics { }, []string{consumer}, ), + notificationsSkipped: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "wal_watcher", + Name: "notifications_skipped_total", + Help: "The number of WAL write notifications that the Watcher has skipped due to already being in a WAL read routine.", + }, + []string{consumer}, + ), } if reg != nil { @@ -141,6 +162,7 @@ func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics { reg.MustRegister(m.recordDecodeFails) reg.MustRegister(m.samplesSentPreTailing) reg.MustRegister(m.currentSegment) + reg.MustRegister(m.notificationsSkipped) } return m @@ -161,13 +183,25 @@ func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logge sendExemplars: sendExemplars, sendHistograms: sendHistograms, - quit: make(chan struct{}), - done: make(chan struct{}), + readNotify: make(chan struct{}), + quit: make(chan struct{}), + done: make(chan struct{}), MaxSegment: -1, } } +func (w *Watcher) Notify() { + select { + case w.readNotify <- struct{}{}: + return + default: // default so we can exit + // we don't need a buffered channel or any buffering since + // for each notification it recv's the watcher will read until EOF + w.notificationsSkipped.Inc() + } +} + func (w *Watcher) setMetrics() { // Setup the WAL Watchers metrics. We do this here rather than in the // constructor because of the ordering of creating Queue Managers's, @@ -177,6 +211,8 @@ func (w *Watcher) setMetrics() { w.recordDecodeFailsMetric = w.metrics.recordDecodeFails.WithLabelValues(w.name) w.samplesSentPreTailing = w.metrics.samplesSentPreTailing.WithLabelValues(w.name) w.currentSegmentMetric = w.metrics.currentSegment.WithLabelValues(w.name) + w.notificationsSkipped = w.metrics.notificationsSkipped.WithLabelValues(w.name) + } } @@ -262,7 +298,7 @@ func (w *Watcher) Run() error { // On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment. // On subsequent calls to this function, currentSegment will have been incremented and we should open that segment. - if err := w.watch(currentSegment, currentSegment >= lastSegment); err != nil { + if err := w.watch(currentSegment, currentSegment >= lastSegment); err != nil && !errors.Is(err, ErrIgnorable) { return err } @@ -330,6 +366,26 @@ func (w *Watcher) segments(dir string) ([]int, error) { return refs, nil } +func (w *Watcher) readAndHandleError(r *LiveReader, segmentNum int, tail bool, size int64) error { + err := w.readSegment(r, segmentNum, tail) + + // Ignore all errors reading to end of segment whilst replaying the WAL. + if !tail { + if err != nil && errors.Cause(err) != io.EOF { + level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err) + } else if r.Offset() != size { + level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", r.Offset(), "size", size) + } + return ErrIgnorable + } + + // Otherwise, when we are tailing, non-EOFs are fatal. + if errors.Cause(err) != io.EOF { + return err + } + return nil +} + // Use tail true to indicate that the reader is currently on a segment that is // actively being written to. If false, assume it's a full segment and we're // replaying it on start to cache the series records. @@ -342,7 +398,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { reader := NewLiveReader(w.logger, w.readerMetrics, segment) - readTicker := time.NewTicker(readPeriod) + readTicker := time.NewTicker(readTimeout) defer readTicker.Stop() checkpointTicker := time.NewTicker(checkpointPeriod) @@ -400,7 +456,6 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { if last <= segmentNum { continue } - err = w.readSegment(reader, segmentNum, tail) // Ignore errors reading to end of segment whilst replaying the WAL. @@ -421,24 +476,23 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { return nil + // we haven't read due to a notification in quite some time, try reading anyways case <-readTicker.C: - err = w.readSegment(reader, segmentNum, tail) - - // Ignore all errors reading to end of segment whilst replaying the WAL. - if !tail { - switch { - case err != nil && errors.Cause(err) != io.EOF: - level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err) - case reader.Offset() != size: - level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size) - } - return nil - } - - // Otherwise, when we are tailing, non-EOFs are fatal. - if errors.Cause(err) != io.EOF { + level.Debug(w.logger).Log("msg", "Watcher is reading the WAL due to timeout, haven't received any write notifications recently", "timeout", readTimeout) + err := w.readAndHandleError(reader, segmentNum, tail, size) + if err != nil { return err } + // still want to reset the ticker so we don't read too often + readTicker.Reset(readTimeout) + + case <-w.readNotify: + err := w.readAndHandleError(reader, segmentNum, tail, size) + if err != nil { + return err + } + // still want to reset the ticker so we don't read too often + readTicker.Reset(readTimeout) } } } diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go index 530d0ffb4..94b6a92d1 100644 --- a/tsdb/wlog/watcher_test.go +++ b/tsdb/wlog/watcher_test.go @@ -104,7 +104,7 @@ func (wtm *writeToMock) SeriesReset(index int) { } } -func (wtm *writeToMock) checkNumLabels() int { +func (wtm *writeToMock) checkNumSeries() int { wtm.seriesLock.Lock() defer wtm.seriesLock.Unlock() return len(wtm.seriesSegmentIndexes) @@ -230,9 +230,9 @@ func TestTailSamples(t *testing.T) { expectedExemplars := seriesCount * exemplarsCount expectedHistograms := seriesCount * histogramsCount retry(t, defaultRetryInterval, defaultRetries, func() bool { - return wt.checkNumLabels() >= expectedSeries + return wt.checkNumSeries() >= expectedSeries }) - require.Equal(t, expectedSeries, wt.checkNumLabels(), "did not receive the expected number of series") + require.Equal(t, expectedSeries, wt.checkNumSeries(), "did not receive the expected number of series") require.Equal(t, expectedSamples, wt.samplesAppended, "did not receive the expected number of samples") require.Equal(t, expectedExemplars, wt.exemplarsAppended, "did not receive the expected number of exemplars") require.Equal(t, expectedHistograms, wt.histogramsAppended, "did not receive the expected number of histograms") @@ -290,7 +290,7 @@ func TestReadToEndNoCheckpoint(t *testing.T) { } } require.NoError(t, w.Log(recs...)) - + readTimeout = time.Second _, _, err = Segments(w.Dir()) require.NoError(t, err) @@ -299,11 +299,10 @@ func TestReadToEndNoCheckpoint(t *testing.T) { go watcher.Start() expected := seriesCount - retry(t, defaultRetryInterval, defaultRetries, func() bool { - return wt.checkNumLabels() >= expected - }) + require.Eventually(t, func() bool { + return wt.checkNumSeries() == expected + }, 20*time.Second, 1*time.Second) watcher.Stop() - require.Equal(t, expected, wt.checkNumLabels()) }) } } @@ -383,16 +382,17 @@ func TestReadToEndWithCheckpoint(t *testing.T) { _, _, err = Segments(w.Dir()) require.NoError(t, err) + readTimeout = time.Second wt := newWriteToMock() watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false) go watcher.Start() expected := seriesCount * 2 - retry(t, defaultRetryInterval, defaultRetries, func() bool { - return wt.checkNumLabels() >= expected - }) + + require.Eventually(t, func() bool { + return wt.checkNumSeries() == expected + }, 10*time.Second, 1*time.Second) watcher.Stop() - require.Equal(t, expected, wt.checkNumLabels()) }) } } @@ -460,10 +460,10 @@ func TestReadCheckpoint(t *testing.T) { expectedSeries := seriesCount retry(t, defaultRetryInterval, defaultRetries, func() bool { - return wt.checkNumLabels() >= expectedSeries + return wt.checkNumSeries() >= expectedSeries }) watcher.Stop() - require.Equal(t, expectedSeries, wt.checkNumLabels()) + require.Equal(t, expectedSeries, wt.checkNumSeries()) }) } } @@ -595,6 +595,7 @@ func TestCheckpointSeriesReset(t *testing.T) { _, _, err = Segments(w.Dir()) require.NoError(t, err) + readTimeout = time.Second wt := newWriteToMock() watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false) watcher.MaxSegment = -1 @@ -602,9 +603,11 @@ func TestCheckpointSeriesReset(t *testing.T) { expected := seriesCount retry(t, defaultRetryInterval, defaultRetries, func() bool { - return wt.checkNumLabels() >= expected + return wt.checkNumSeries() >= expected }) - require.Equal(t, seriesCount, wt.checkNumLabels()) + require.Eventually(t, func() bool { + return wt.checkNumSeries() == seriesCount + }, 10*time.Second, 1*time.Second) _, err = Checkpoint(log.NewNopLogger(), w, 2, 4, func(x chunks.HeadSeriesRef) bool { return true }, 0) require.NoError(t, err) @@ -621,7 +624,9 @@ func TestCheckpointSeriesReset(t *testing.T) { // If you modify the checkpoint and truncate segment #'s run the test to see how // many series records you end up with and change the last Equals check accordingly // or modify the Equals to Assert(len(wt.seriesLabels) < seriesCount*10) - require.Equal(t, tc.segments, wt.checkNumLabels()) + require.Eventually(t, func() bool { + return wt.checkNumSeries() == tc.segments + }, 20*time.Second, 1*time.Second) }) } } diff --git a/tsdb/wlog/wlog.go b/tsdb/wlog/wlog.go index df8bab53f..e38cb94cb 100644 --- a/tsdb/wlog/wlog.go +++ b/tsdb/wlog/wlog.go @@ -188,6 +188,8 @@ type WL struct { compress bool snappyBuf []byte + WriteNotified WriteNotified + metrics *wlMetrics } @@ -343,6 +345,10 @@ func (w *WL) Dir() string { return w.dir } +func (w *WL) SetWriteNotified(wn WriteNotified) { + w.WriteNotified = wn +} + func (w *WL) run() { Loop: for { From 191bf9055bb51093c5f13d2ccef5a18ec4786260 Mon Sep 17 00:00:00 2001 From: zenador Date: Wed, 17 May 2023 03:15:20 +0800 Subject: [PATCH 145/632] Handle more arithmetic operators for native histograms (#12262) Handle more arithmetic operators and aggregators for native histograms This includes operators for multiplication (formerly known as scaling), division, and subtraction. Plus aggregations for average and the avg_over_time function. Stdvar and stddev will (for now) ignore histograms properly (rather than counting them but adding a 0 for them). Signed-off-by: Jeanette Tan --- docs/querying/functions.md | 5 +- docs/querying/operators.md | 26 +- model/histogram/float_histogram.go | 19 +- model/histogram/float_histogram_test.go | 149 ++++++- promql/engine.go | 145 +++++-- promql/engine_test.go | 499 ++++++++++++++++++++++-- promql/functions.go | 60 ++- 7 files changed, 803 insertions(+), 100 deletions(-) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index e92a58937..c8831c079 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -589,8 +589,9 @@ over time and return an instant vector with per-series aggregation results: Note that all values in the specified interval have the same weight in the aggregation even if the values are not equally spaced throughout the interval. -`count_over_time`, `last_over_time`, and `present_over_time` handle native -histograms as expected. All other functions ignore histogram samples. +`avg_over_time`, `sum_over_time`, `count_over_time`, `last_over_time`, and +`present_over_time` handle native histograms as expected. All other functions +ignore histogram samples. ## Trigonometric Functions diff --git a/docs/querying/operators.md b/docs/querying/operators.md index 0cd536894..c691a0f1a 100644 --- a/docs/querying/operators.md +++ b/docs/querying/operators.md @@ -318,19 +318,23 @@ histograms is still very limited. Logical/set binary operators work as expected even if histogram samples are involved. They only check for the existence of a vector element and don't change their behavior depending on the sample type of an element (float or -histogram). +histogram). The `count` aggregation operator works similarly. -The binary `+` operator between two native histograms and the `sum` aggregation -operator to aggregate native histograms are fully supported. Even if the -histograms involved have different bucket layouts, the buckets are -automatically converted appropriately so that the operation can be +The binary `+` and `-` operators between two native histograms and the `sum` +and `avg` aggregation operators to aggregate native histograms are fully +supported. Even if the histograms involved have different bucket layouts, the +buckets are automatically converted appropriately so that the operation can be performed. (With the currently supported bucket schemas, that's always -possible.) If either operator has to sum up a mix of histogram samples and +possible.) If either operator has to aggregate a mix of histogram samples and float samples, the corresponding vector element is removed from the output vector entirely. -All other operators do not behave in a meaningful way. They either treat the -histogram sample as if it were a float sample of value 0, or (in case of -arithmetic operations between a scalar and a vector) they leave the histogram -sample unchanged. This behavior will change to a meaningful one before native -histograms are a stable feature. +The binary `*` operator works between a native histogram and a float in any +order, while the binary `/` operator can be used between a native histogram +and a float in that exact order. + +All other operators (and unmentioned cases for the above operators) do not +behave in a meaningful way. They either treat the histogram sample as if it +were a float sample of value 0, or (in case of arithmetic operations between a +scalar and a vector) they leave the histogram sample unchanged. This behavior +will change to a meaningful one before native histograms are a stable feature. diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index f95f0051c..f4ee13fac 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -159,12 +159,12 @@ func (h *FloatHistogram) ZeroBucket() Bucket[float64] { } } -// Scale scales the FloatHistogram by the provided factor, i.e. it scales all +// Mul multiplies the FloatHistogram by the provided factor, i.e. it scales all // bucket counts including the zero bucket and the count and the sum of // observations. The bucket layout stays the same. This method changes the // receiving histogram directly (rather than acting on a copy). It returns a // pointer to the receiving histogram for convenience. -func (h *FloatHistogram) Scale(factor float64) *FloatHistogram { +func (h *FloatHistogram) Mul(factor float64) *FloatHistogram { h.ZeroCount *= factor h.Count *= factor h.Sum *= factor @@ -177,6 +177,21 @@ func (h *FloatHistogram) Scale(factor float64) *FloatHistogram { return h } +// Div works like Scale but divides instead of multiplies. +// When dividing by 0, everything will be set to Inf. +func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { + h.ZeroCount /= scalar + h.Count /= scalar + h.Sum /= scalar + for i := range h.PositiveBuckets { + h.PositiveBuckets[i] /= scalar + } + for i := range h.NegativeBuckets { + h.NegativeBuckets[i] /= scalar + } + return h +} + // Add adds the provided other histogram to the receiving histogram. Count, Sum, // and buckets from the other histogram are added to the corresponding // components of the receiving histogram. Buckets in the other histogram that do diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index 58aad9645..242ef4c92 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -15,12 +15,13 @@ package histogram import ( "fmt" + "math" "testing" "github.com/stretchr/testify/require" ) -func TestFloatHistogramScale(t *testing.T) { +func TestFloatHistogramMul(t *testing.T) { cases := []struct { name string in *FloatHistogram @@ -33,6 +34,30 @@ func TestFloatHistogramScale(t *testing.T) { 3.1415, &FloatHistogram{}, }, + { + "zero multiplier", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + 0, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 0, + Count: 0, + Sum: 0, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{0, 0, 0, 0}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{0, 0, 0, 0}, + }, + }, { "no-op", &FloatHistogram{ @@ -81,17 +106,137 @@ func TestFloatHistogramScale(t *testing.T) { NegativeBuckets: []float64{6.2, 6, 1.234e5 * 2, 2000}, }, }, + { + "triple", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + 3, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 33, + Count: 90, + Sum: 69, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{3, 0, 9, 12, 21}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{9, 3, 15, 18}, + }, + }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { - require.Equal(t, c.expected, c.in.Scale(c.scale)) + require.Equal(t, c.expected, c.in.Mul(c.scale)) // Has it also happened in-place? require.Equal(t, c.expected, c.in) }) } } +func TestFloatHistogramDiv(t *testing.T) { + cases := []struct { + name string + fh *FloatHistogram + s float64 + expected *FloatHistogram + }{ + { + "zero value", + &FloatHistogram{}, + 3.1415, + &FloatHistogram{}, + }, + { + "zero divisor", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + 0, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: math.Inf(1), + Count: math.Inf(1), + Sum: math.Inf(1), + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1), math.Inf(1)}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1), math.Inf(1)}, + }, + }, + { + "no-op", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + 1, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + }, + { + "half", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + 2, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 15, + Sum: 11.5, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{0.5, 0, 1.5, 2, 3.5}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{1.5, 0.5, 2.5, 3}, + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + require.Equal(t, c.expected, c.fh.Div(c.s)) + // Has it also happened in-place? + require.Equal(t, c.expected, c.fh) + }) + } +} + func TestFloatHistogramDetectReset(t *testing.T) { cases := []struct { name string diff --git a/promql/engine.go b/promql/engine.go index ae46f6005..8a64fdf39 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -2263,14 +2263,11 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * insertedSigs[insertSig] = struct{}{} } - if (hl != nil && hr != nil) || (hl == nil && hr == nil) { - // Both lhs and rhs are of same type. - enh.Out = append(enh.Out, Sample{ - Metric: metric, - F: floatValue, - H: histogramValue, - }) - } + enh.Out = append(enh.Out, Sample{ + Metric: metric, + F: floatValue, + H: histogramValue, + }) } return enh.Out } @@ -2337,28 +2334,33 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V // VectorscalarBinop evaluates a binary operation between a Vector and a Scalar. func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) Vector { for _, lhsSample := range lhs { - lv, rv := lhsSample.F, rhs.V + lf, rf := lhsSample.F, rhs.V + var rh *histogram.FloatHistogram + lh := lhsSample.H // lhs always contains the Vector. If the original position was different // swap for calculating the value. if swap { - lv, rv = rv, lv + lf, rf = rf, lf + lh, rh = rh, lh } - value, _, keep := vectorElemBinop(op, lv, rv, nil, nil) + float, histogram, keep := vectorElemBinop(op, lf, rf, lh, rh) // Catch cases where the scalar is the LHS in a scalar-vector comparison operation. // We want to always keep the vector element value as the output value, even if it's on the RHS. if op.IsComparisonOperator() && swap { - value = rv + float = rf + histogram = rh } if returnBool { if keep { - value = 1.0 + float = 1.0 } else { - value = 0.0 + float = 0.0 } keep = true } if keep { - lhsSample.F = value + lhsSample.F = float + lhsSample.H = histogram if shouldDropMetricName(op) || returnBool { lhsSample.Metric = enh.DropMetricName(lhsSample.Metric) } @@ -2413,16 +2415,33 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram // The histogram being added must have the larger schema // code (i.e. the higher resolution). if hrhs.Schema >= hlhs.Schema { - return 0, hlhs.Copy().Add(hrhs), true + return 0, hlhs.Copy().Add(hrhs).Compact(0), true } - return 0, hrhs.Copy().Add(hlhs), true + return 0, hrhs.Copy().Add(hlhs).Compact(0), true } return lhs + rhs, nil, true case parser.SUB: + if hlhs != nil && hrhs != nil { + // The histogram being subtracted must have the larger schema + // code (i.e. the higher resolution). + if hrhs.Schema >= hlhs.Schema { + return 0, hlhs.Copy().Sub(hrhs).Compact(0), true + } + return 0, hrhs.Copy().Mul(-1).Add(hlhs).Compact(0), true + } return lhs - rhs, nil, true case parser.MUL: + if hlhs != nil && hrhs == nil { + return 0, hlhs.Copy().Mul(rhs), true + } + if hlhs == nil && hrhs != nil { + return 0, hrhs.Copy().Mul(lhs), true + } return lhs * rhs, nil, true case parser.DIV: + if hlhs != nil && hrhs == nil { + return 0, hlhs.Copy().Div(rhs), true + } return lhs / rhs, nil, true case parser.POW: return math.Pow(lhs, rhs), nil, true @@ -2452,7 +2471,8 @@ type groupedAggregation struct { labels labels.Labels floatValue float64 histogramValue *histogram.FloatHistogram - mean float64 + floatMean float64 + histogramMean *histogram.FloatHistogram groupCount int heap vectorByValueHeap reverseHeap vectorByReverseValueHeap @@ -2536,7 +2556,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without newAgg := &groupedAggregation{ labels: m, floatValue: s.F, - mean: s.F, + floatMean: s.F, groupCount: 1, } switch { @@ -2545,6 +2565,11 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without case op == parser.SUM: newAgg.histogramValue = s.H.Copy() newAgg.hasHistogram = true + case op == parser.AVG: + newAgg.histogramMean = s.H.Copy() + newAgg.hasHistogram = true + case op == parser.STDVAR || op == parser.STDDEV: + newAgg.groupCount = 0 } result[groupingKey] = newAgg @@ -2589,9 +2614,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without if s.H.Schema >= group.histogramValue.Schema { group.histogramValue.Add(s.H) } else { - h := s.H.Copy() - h.Add(group.histogramValue) - group.histogramValue = h + group.histogramValue = s.H.Copy().Add(group.histogramValue) } } // Otherwise the aggregation contained floats @@ -2604,25 +2627,46 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without case parser.AVG: group.groupCount++ - if math.IsInf(group.mean, 0) { - if math.IsInf(s.F, 0) && (group.mean > 0) == (s.F > 0) { - // The `mean` and `s.V` values are `Inf` of the same sign. They - // can't be subtracted, but the value of `mean` is correct - // already. - break + if s.H != nil { + group.hasHistogram = true + if group.histogramMean != nil { + left := s.H.Copy().Div(float64(group.groupCount)) + right := group.histogramMean.Copy().Div(float64(group.groupCount)) + // The histogram being added/subtracted must have + // an equal or larger schema. + if s.H.Schema >= group.histogramMean.Schema { + toAdd := right.Mul(-1).Add(left) + group.histogramMean.Add(toAdd) + } else { + toAdd := left.Sub(right) + group.histogramMean = toAdd.Add(group.histogramMean) + } } - if !math.IsInf(s.F, 0) && !math.IsNaN(s.F) { - // At this stage, the mean is an infinite. If the added - // value is neither an Inf or a Nan, we can keep that mean - // value. - // This is required because our calculation below removes - // the mean value, which would look like Inf += x - Inf and - // end up as a NaN. - break + // Otherwise the aggregation contained floats + // previously and will be invalid anyway. No + // point in copying the histogram in that case. + } else { + group.hasFloat = true + if math.IsInf(group.floatMean, 0) { + if math.IsInf(s.F, 0) && (group.floatMean > 0) == (s.F > 0) { + // The `floatMean` and `s.F` values are `Inf` of the same sign. They + // can't be subtracted, but the value of `floatMean` is correct + // already. + break + } + if !math.IsInf(s.F, 0) && !math.IsNaN(s.F) { + // At this stage, the mean is an infinite. If the added + // value is neither an Inf or a Nan, we can keep that mean + // value. + // This is required because our calculation below removes + // the mean value, which would look like Inf += x - Inf and + // end up as a NaN. + break + } } + // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. + group.floatMean += s.F/float64(group.groupCount) - group.floatMean/float64(group.groupCount) } - // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. - group.mean += s.F/float64(group.groupCount) - group.mean/float64(group.groupCount) case parser.GROUP: // Do nothing. Required to avoid the panic in `default:` below. @@ -2641,10 +2685,12 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without group.groupCount++ case parser.STDVAR, parser.STDDEV: - group.groupCount++ - delta := s.F - group.mean - group.mean += delta / float64(group.groupCount) - group.floatValue += delta * (s.F - group.mean) + if s.H == nil { // Ignore native histograms. + group.groupCount++ + delta := s.F - group.floatMean + group.floatMean += delta / float64(group.groupCount) + group.floatValue += delta * (s.F - group.floatMean) + } case parser.TOPK: // We build a heap of up to k elements, with the smallest element at heap[0]. @@ -2696,7 +2742,16 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without for _, aggr := range orderedResult { switch op { case parser.AVG: - aggr.floatValue = aggr.mean + if aggr.hasFloat && aggr.hasHistogram { + // We cannot aggregate histogram sample with a float64 sample. + // TODO(zenador): Issue warning when plumbing is in place. + continue + } + if aggr.hasHistogram { + aggr.histogramValue = aggr.histogramMean.Compact(0) + } else { + aggr.floatValue = aggr.floatMean + } case parser.COUNT, parser.COUNT_VALUES: aggr.floatValue = float64(aggr.groupCount) @@ -2739,8 +2794,12 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without case parser.SUM: if aggr.hasFloat && aggr.hasHistogram { // We cannot aggregate histogram sample with a float64 sample. + // TODO(zenador): Issue warning when plumbing is in place. continue } + if aggr.hasHistogram { + aggr.histogramValue.Compact(0) + } default: // For other aggregations, we already have the right value. } diff --git a/promql/engine_test.go b/promql/engine_test.go index cfe3694dd..72cbf9153 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -3966,21 +3966,23 @@ func TestNativeHistogram_HistogramFraction(t *testing.T) { } } -func TestNativeHistogram_Sum_Count_AddOperator(t *testing.T) { +func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { // TODO(codesome): Integrate histograms into the PromQL testing framework // and write more tests there. cases := []struct { - histograms []histogram.Histogram - expected histogram.FloatHistogram + histograms []histogram.Histogram + expected histogram.FloatHistogram + expectedAvg histogram.FloatHistogram }{ { histograms: []histogram.Histogram{ { - Schema: 0, - Count: 21, - Sum: 1234.5, - ZeroThreshold: 0.001, - ZeroCount: 4, + CounterResetHint: histogram.GaugeType, + Schema: 0, + Count: 21, + Sum: 1234.5, + ZeroThreshold: 0.001, + ZeroCount: 4, PositiveSpans: []histogram.Span{ {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, @@ -3992,6 +3994,182 @@ func TestNativeHistogram_Sum_Count_AddOperator(t *testing.T) { }, NegativeBuckets: []int64{2, 2, -3, 8}, }, + { + CounterResetHint: histogram.GaugeType, + Schema: 0, + Count: 36, + Sum: 2345.6, + ZeroThreshold: 0.001, + ZeroCount: 5, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 2, Length: 3}, + }, + NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, + }, + { + CounterResetHint: histogram.GaugeType, + Schema: 0, + Count: 36, + Sum: 1111.1, + ZeroThreshold: 0.001, + ZeroCount: 5, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 2, Length: 3}, + }, + NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, + }, + { + CounterResetHint: histogram.GaugeType, + Schema: 1, // Everything is 0 just to make the count 4 so avg has nicer numbers. + }, + }, + expected: histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 0, + ZeroThreshold: 0.001, + ZeroCount: 14, + Count: 93, + Sum: 4691.2, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 7}, + }, + PositiveBuckets: []float64{3, 8, 2, 5, 3, 2, 2}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 6}, + {Offset: 3, Length: 3}, + }, + NegativeBuckets: []float64{2, 6, 8, 4, 15, 9, 10, 10, 4}, + }, + expectedAvg: histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 0, + ZeroThreshold: 0.001, + ZeroCount: 3.5, + Count: 23.25, + Sum: 1172.8, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 7}, + }, + PositiveBuckets: []float64{0.75, 2, 0.5, 1.25, 0.75, 0.5, 0.5}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 6}, + {Offset: 3, Length: 3}, + }, + NegativeBuckets: []float64{0.5, 1.5, 2, 1, 3.75, 2.25, 2.5, 2.5, 1}, + }, + }, + } + + idx0 := int64(0) + for _, c := range cases { + for _, floatHisto := range []bool{true, false} { + t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { + test, err := NewTest(t, "") + require.NoError(t, err) + t.Cleanup(test.Close) + + seriesName := "sparse_histogram_series" + seriesNameOverTime := "sparse_histogram_series_over_time" + + engine := test.QueryEngine() + + ts := idx0 * int64(10*time.Minute/time.Millisecond) + app := test.Storage().Appender(context.TODO()) + for idx1, h := range c.histograms { + lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1)) + // Since we mutate h later, we need to create a copy here. + if floatHisto { + _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat()) + } else { + _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) + } + require.NoError(t, err) + + lbls = labels.FromStrings("__name__", seriesNameOverTime) + newTs := ts + int64(idx1)*int64(time.Minute/time.Millisecond) + // Since we mutate h later, we need to create a copy here. + if floatHisto { + _, err = app.AppendHistogram(0, lbls, newTs, nil, h.Copy().ToFloat()) + } else { + _, err = app.AppendHistogram(0, lbls, newTs, h.Copy(), nil) + } + require.NoError(t, err) + } + require.NoError(t, app.Commit()) + + queryAndCheck := func(queryString string, ts int64, exp Vector) { + qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res := qry.Exec(test.Context()) + require.NoError(t, res.Err) + + vector, err := res.Vector() + require.NoError(t, err) + + require.Equal(t, exp, vector) + } + + // sum(). + queryString := fmt.Sprintf("sum(%s)", seriesName) + queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) + + // + operator. + queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName) + for idx := 1; idx < len(c.histograms); idx++ { + queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx) + } + queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) + + // count(). + queryString = fmt.Sprintf("count(%s)", seriesName) + queryAndCheck(queryString, ts, []Sample{{T: ts, F: 4, Metric: labels.EmptyLabels()}}) + + // avg(). + queryString = fmt.Sprintf("avg(%s)", seriesName) + queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) + + offset := int64(len(c.histograms) - 1) + newTs := ts + offset*int64(time.Minute/time.Millisecond) + + // sum_over_time(). + queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset) + queryAndCheck(queryString, newTs, []Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels()}}) + + // avg_over_time(). + queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset) + queryAndCheck(queryString, newTs, []Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) + }) + idx0++ + } + } +} + +func TestNativeHistogram_SubOperator(t *testing.T) { + // TODO(codesome): Integrate histograms into the PromQL testing framework + // and write more tests there. + cases := []struct { + histograms []histogram.Histogram + expected histogram.FloatHistogram + }{ + { + histograms: []histogram.Histogram{ { Schema: 0, Count: 36, @@ -4011,10 +4189,117 @@ func TestNativeHistogram_Sum_Count_AddOperator(t *testing.T) { }, NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, }, + { + Schema: 0, + Count: 11, + Sum: 1234.5, + ZeroThreshold: 0.001, + ZeroCount: 3, + PositiveSpans: []histogram.Span{ + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, -1}, + NegativeSpans: []histogram.Span{ + {Offset: 2, Length: 2}, + }, + NegativeBuckets: []int64{3, -1}, + }, + }, + expected: histogram.FloatHistogram{ + Schema: 0, + Count: 25, + Sum: 1111.1, + ZeroThreshold: 0.001, + ZeroCount: 2, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 4}, + }, + PositiveBuckets: []float64{1, 1, 2, 1, 1, 1}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 2}, + {Offset: 1, Length: 1}, + {Offset: 4, Length: 3}, + }, + NegativeBuckets: []float64{1, 1, 7, 5, 5, 2}, + }, + }, + { + histograms: []histogram.Histogram{ { Schema: 0, Count: 36, - Sum: 1111.1, + Sum: 2345.6, + ZeroThreshold: 0.001, + ZeroCount: 5, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 2, Length: 3}, + }, + NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, + }, + { + Schema: 1, + Count: 11, + Sum: 1234.5, + ZeroThreshold: 0.001, + ZeroCount: 3, + PositiveSpans: []histogram.Span{ + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, -1}, + NegativeSpans: []histogram.Span{ + {Offset: 2, Length: 2}, + }, + NegativeBuckets: []int64{3, -1}, + }, + }, + expected: histogram.FloatHistogram{ + Schema: 0, + Count: 25, + Sum: 1111.1, + ZeroThreshold: 0.001, + ZeroCount: 2, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 1}, + {Offset: 1, Length: 5}, + }, + PositiveBuckets: []float64{1, 1, 2, 1, 1, 1}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 4}, + {Offset: 4, Length: 3}, + }, + NegativeBuckets: []float64{-2, 2, 2, 7, 5, 5, 2}, + }, + }, + { + histograms: []histogram.Histogram{ + { + Schema: 1, + Count: 11, + Sum: 1234.5, + ZeroThreshold: 0.001, + ZeroCount: 3, + PositiveSpans: []histogram.Span{ + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, -1}, + NegativeSpans: []histogram.Span{ + {Offset: 2, Length: 2}, + }, + NegativeBuckets: []int64{3, -1}, + }, + { + Schema: 0, + Count: 36, + Sum: 2345.6, ZeroThreshold: 0.001, ZeroCount: 5, PositiveSpans: []histogram.Span{ @@ -4033,21 +4318,20 @@ func TestNativeHistogram_Sum_Count_AddOperator(t *testing.T) { }, expected: histogram.FloatHistogram{ Schema: 0, + Count: -25, + Sum: -1111.1, ZeroThreshold: 0.001, - ZeroCount: 14, - Count: 93, - Sum: 4691.2, + ZeroCount: -2, PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - {Offset: 0, Length: 4}, + {Offset: 0, Length: 1}, + {Offset: 1, Length: 5}, }, - PositiveBuckets: []float64{3, 8, 2, 5, 3, 2, 2}, + PositiveBuckets: []float64{-1, -1, -2, -1, -1, -1}, NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 4}, - {Offset: 0, Length: 2}, - {Offset: 3, Length: 3}, + {Offset: 1, Length: 4}, + {Offset: 4, Length: 3}, }, - NegativeBuckets: []float64{2, 6, 8, 4, 15, 9, 10, 10, 4}, + NegativeBuckets: []float64{2, -2, -2, -7, -5, -5, -2}, }, }, } @@ -4091,20 +4375,177 @@ func TestNativeHistogram_Sum_Count_AddOperator(t *testing.T) { require.Equal(t, exp, vector) } - // sum(). - queryString := fmt.Sprintf("sum(%s)", seriesName) - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) - - // + operator. - queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName) + // - operator. + queryString := fmt.Sprintf(`%s{idx="0"}`, seriesName) for idx := 1; idx < len(c.histograms); idx++ { - queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx) + queryString += fmt.Sprintf(` - ignoring(idx) %s{idx="%d"}`, seriesName, idx) } queryAndCheck(queryString, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) + }) + idx0++ + } + } +} - // count(). - queryString = fmt.Sprintf("count(%s)", seriesName) - queryAndCheck(queryString, []Sample{{T: ts, F: 3, Metric: labels.EmptyLabels()}}) +func TestNativeHistogram_MulDivOperator(t *testing.T) { + // TODO(codesome): Integrate histograms into the PromQL testing framework + // and write more tests there. + originalHistogram := histogram.Histogram{ + Schema: 0, + Count: 21, + Sum: 33, + ZeroThreshold: 0.001, + ZeroCount: 3, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{3, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + NegativeBuckets: []int64{3, 0, 0}, + } + + cases := []struct { + scalar float64 + histogram histogram.Histogram + expectedMul histogram.FloatHistogram + expectedDiv histogram.FloatHistogram + }{ + { + scalar: 3, + histogram: originalHistogram, + expectedMul: histogram.FloatHistogram{ + Schema: 0, + Count: 63, + Sum: 99, + ZeroThreshold: 0.001, + ZeroCount: 9, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []float64{9, 9, 9}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + NegativeBuckets: []float64{9, 9, 9}, + }, + expectedDiv: histogram.FloatHistogram{ + Schema: 0, + Count: 7, + Sum: 11, + ZeroThreshold: 0.001, + ZeroCount: 1, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []float64{1, 1, 1}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + NegativeBuckets: []float64{1, 1, 1}, + }, + }, + { + scalar: 0, + histogram: originalHistogram, + expectedMul: histogram.FloatHistogram{ + Schema: 0, + Count: 0, + Sum: 0, + ZeroThreshold: 0.001, + ZeroCount: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []float64{0, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + NegativeBuckets: []float64{0, 0, 0}, + }, + expectedDiv: histogram.FloatHistogram{ + Schema: 0, + Count: math.Inf(1), + Sum: math.Inf(1), + ZeroThreshold: 0.001, + ZeroCount: math.Inf(1), + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + NegativeBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)}, + }, + }, + } + + idx0 := int64(0) + for _, c := range cases { + for _, floatHisto := range []bool{true, false} { + t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { + test, err := NewTest(t, "") + require.NoError(t, err) + t.Cleanup(test.Close) + + seriesName := "sparse_histogram_series" + floatSeriesName := "float_series" + + engine := test.QueryEngine() + + ts := idx0 * int64(10*time.Minute/time.Millisecond) + app := test.Storage().Appender(context.TODO()) + h := c.histogram + lbls := labels.FromStrings("__name__", seriesName) + // Since we mutate h later, we need to create a copy here. + if floatHisto { + _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat()) + } else { + _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) + } + require.NoError(t, err) + _, err = app.Append(0, labels.FromStrings("__name__", floatSeriesName), ts, c.scalar) + require.NoError(t, err) + require.NoError(t, app.Commit()) + + queryAndCheck := func(queryString string, exp Vector) { + qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res := qry.Exec(test.Context()) + require.NoError(t, res.Err) + + vector, err := res.Vector() + require.NoError(t, err) + + require.Equal(t, exp, vector) + } + + // histogram * scalar. + queryString := fmt.Sprintf(`%s * %f`, seriesName, c.scalar) + queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) + + // scalar * histogram. + queryString = fmt.Sprintf(`%f * %s`, c.scalar, seriesName) + queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) + + // histogram * float. + queryString = fmt.Sprintf(`%s * %s`, seriesName, floatSeriesName) + queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) + + // float * histogram. + queryString = fmt.Sprintf(`%s * %s`, floatSeriesName, seriesName) + queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) + + // histogram / scalar. + queryString = fmt.Sprintf(`%s / %f`, seriesName, c.scalar) + queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}}) + + // histogram / float. + queryString = fmt.Sprintf(`%s / %s`, seriesName, floatSeriesName) + queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}}) }) idx0++ } diff --git a/promql/functions.go b/promql/functions.go index df29d6c5d..96bffab96 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -162,7 +162,7 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod if resultHistogram == nil { resultFloat *= factor } else { - resultHistogram.Scale(factor) + resultHistogram.Mul(factor) } return append(enh.Out, Sample{F: resultFloat, H: resultHistogram}) @@ -443,15 +443,40 @@ func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) return append(enh.Out, Sample{F: aggrFn(el)}) } +func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) *histogram.FloatHistogram) Vector { + el := vals[0].(Matrix)[0] + + return append(enh.Out, Sample{H: aggrFn(el)}) +} + // === avg_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - if len(vals[0].(Matrix)[0].Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. avg_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. + if len(vals[0].(Matrix)[0].Floats) > 0 && len(vals[0].(Matrix)[0].Histograms) > 0 { + // TODO(zenador): Add warning for mixed floats and histograms. return enh.Out } + if len(vals[0].(Matrix)[0].Floats) == 0 { + // The passed values only contain histograms. + return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { + count := 1 + mean := s.Histograms[0].H.Copy() + for _, h := range s.Histograms[1:] { + count++ + left := h.H.Copy().Div(float64(count)) + right := mean.Copy().Div(float64(count)) + // The histogram being added/subtracted must have + // an equal or larger schema. + if h.H.Schema >= mean.Schema { + toAdd := right.Mul(-1).Add(left) + mean.Add(toAdd) + } else { + toAdd := left.Sub(right) + mean = toAdd.Add(mean) + } + } + return mean + }) + } return aggrOverTime(vals, enh, func(s Series) float64 { var mean, count, c float64 for _, f := range s.Floats { @@ -558,13 +583,26 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode // === sum_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - if len(vals[0].(Matrix)[0].Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. sum_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. + if len(vals[0].(Matrix)[0].Floats) > 0 && len(vals[0].(Matrix)[0].Histograms) > 0 { + // TODO(zenador): Add warning for mixed floats and histograms. return enh.Out } + if len(vals[0].(Matrix)[0].Floats) == 0 { + // The passed values only contain histograms. + return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { + sum := s.Histograms[0].H.Copy() + for _, h := range s.Histograms[1:] { + // The histogram being added must have + // an equal or larger schema. + if h.H.Schema >= sum.Schema { + sum.Add(h.H) + } else { + sum = h.H.Copy().Add(sum) + } + } + return sum + }) + } return aggrOverTime(vals, enh, func(s Series) float64 { var sum, c float64 for _, f := range s.Floats { From 0dc31ade41e4693b1f30ae2ca35648a92e0583f1 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Wed, 17 May 2023 00:14:58 +0200 Subject: [PATCH 146/632] Add support for consul path_prefix Signed-off-by: Julien Pivotto --- config/config_test.go | 1 + config/testdata/conf.good.yml | 1 + discovery/consul/consul.go | 2 ++ docs/configuration/configuration.md | 2 ++ 4 files changed, 6 insertions(+) diff --git a/config/config_test.go b/config/config_test.go index 3ee327c5f..bde09dfec 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -363,6 +363,7 @@ var expectedConf = &Config{ ServiceDiscoveryConfigs: discovery.Configs{ &consul.SDConfig{ Server: "localhost:1234", + PathPrefix: "/consul", Token: "mysecret", Services: []string{"nginx", "cache", "mysql"}, ServiceTags: []string{"canary", "v1"}, diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index 764f1a342..388b9de32 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -151,6 +151,7 @@ scrape_configs: consul_sd_configs: - server: "localhost:1234" token: mysecret + path_prefix: /consul services: ["nginx", "cache", "mysql"] tags: ["canary", "v1"] node_meta: diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go index c59bd1f5d..99ea396b9 100644 --- a/discovery/consul/consul.go +++ b/discovery/consul/consul.go @@ -111,6 +111,7 @@ func init() { // SDConfig is the configuration for Consul service discovery. type SDConfig struct { Server string `yaml:"server,omitempty"` + PathPrefix string `yaml:"path_prefix,omitempty"` Token config.Secret `yaml:"token,omitempty"` Datacenter string `yaml:"datacenter,omitempty"` Namespace string `yaml:"namespace,omitempty"` @@ -211,6 +212,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { clientConf := &consul.Config{ Address: conf.Server, + PathPrefix: conf.PathPrefix, Scheme: conf.Scheme, Datacenter: conf.Datacenter, Namespace: conf.Namespace, diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index c74c9d478..b0b587e02 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -585,6 +585,8 @@ The following meta labels are available on targets during [relabeling](#relabel_ # The information to access the Consul API. It is to be defined # as the Consul documentation requires. [ server: | default = "localhost:8500" ] +# Prefix for URIs for when consul is behind an API gateway (reverse proxy). +[ path_prefix: ] [ token: ] [ datacenter: ] # Namespaces are only supported in Consul Enterprise. From f26760cf32b9f64871568ff4e7c78cd4f325a621 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 May 2023 22:30:22 +0000 Subject: [PATCH 147/632] build(deps): bump github.com/hetznercloud/hcloud-go Bumps [github.com/hetznercloud/hcloud-go](https://github.com/hetznercloud/hcloud-go) from 1.43.0 to 1.45.1. - [Release notes](https://github.com/hetznercloud/hcloud-go/releases) - [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/hetznercloud/hcloud-go/compare/v1.43.0...v1.45.1) --- updated-dependencies: - dependency-name: github.com/hetznercloud/hcloud-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 36065b21d..c90285438 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/consul/api v1.20.0 github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 - github.com/hetznercloud/hcloud-go v1.43.0 + github.com/hetznercloud/hcloud-go v1.45.1 github.com/ionos-cloud/sdk-go/v6 v6.1.6 github.com/json-iterator/go v1.1.12 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b @@ -41,7 +41,7 @@ require ( github.com/ovh/go-ovh v1.4.1 github.com/pkg/errors v0.9.1 github.com/prometheus/alertmanager v0.25.0 - github.com/prometheus/client_golang v1.15.0 + github.com/prometheus/client_golang v1.15.1 github.com/prometheus/client_model v0.3.0 github.com/prometheus/common v0.42.0 github.com/prometheus/common/assets v0.2.0 diff --git a/go.sum b/go.sum index dc4488872..9ecc5c727 100644 --- a/go.sum +++ b/go.sum @@ -456,8 +456,8 @@ github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197/go.mod h1:2TCr github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go v1.43.0 h1:m4p5+mz32Tt+bHkNQEg9RQdtMIu+SUdMjs29LsOJjUk= -github.com/hetznercloud/hcloud-go v1.43.0/go.mod h1:DPs7Dvae8LrTVOWyq2abwQQOwfpfICAzKHm2HQMU5/E= +github.com/hetznercloud/hcloud-go v1.45.1 h1:nl0OOklFfQT5J6AaNIOhl5Ruh3fhmGmhvZEqHbibVuk= +github.com/hetznercloud/hcloud-go v1.45.1/go.mod h1:aAUGxSfSnB8/lVXHNEDxtCT1jykaul8kqjD7f5KQXF8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -647,8 +647,8 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM= -github.com/prometheus/client_golang v1.15.0/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= +github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= From 80b7f73d267a812b3689321554aec637b75f468d Mon Sep 17 00:00:00 2001 From: Xiaochao Dong Date: Wed, 17 May 2023 21:15:12 +0800 Subject: [PATCH 148/632] Copy tombstone intervals to avoid race (#12245) Signed-off-by: Xiaochao Dong (@damnever) --- tsdb/querier_test.go | 40 ++++++++++++++++++++++++++++++ tsdb/tombstones/tombstones.go | 32 +++++++++++++++++------- tsdb/tombstones/tombstones_test.go | 36 +++++++++++++++++++++++++++ 3 files changed, 99 insertions(+), 9 deletions(-) diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 9e17f5b8d..6802fc250 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -501,6 +501,46 @@ func TestBlockQuerier_AgainstHeadWithOpenChunks(t *testing.T) { } } +func TestBlockQuerier_TrimmingDoesNotModifyOriginalTombstoneIntervals(t *testing.T) { + c := blockQuerierTestCase{ + mint: 2, + maxt: 6, + ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", "a")}, + exp: newMockSeriesSet([]storage.Series{ + storage.NewListSeries(labels.FromStrings("a", "a"), + []tsdbutil.Sample{sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}}, + ), + storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"), + []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, + ), + }), + expChks: newMockChunkSeriesSet([]storage.ChunkSeries{ + storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"), + []tsdbutil.Sample{sample{3, 4, nil, nil}}, []tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}}, + ), + storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"), + []tsdbutil.Sample{sample{3, 3, nil, nil}}, []tsdbutil.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, + ), + }), + } + ir, cr, _, _ := createIdxChkReaders(t, testData) + stones := tombstones.NewMemTombstones() + p, err := ir.Postings("a", "a") + require.NoError(t, err) + refs, err := index.ExpandPostings(p) + require.NoError(t, err) + for _, ref := range refs { + stones.AddInterval(ref, tombstones.Interval{Mint: 1, Maxt: 2}) + } + testBlockQuerier(t, c, ir, cr, stones) + for _, ref := range refs { + intervals, err := stones.Get(ref) + require.NoError(t, err) + // Without copy, the intervals could be [math.MinInt64, 2]. + require.Equal(t, tombstones.Intervals{{Mint: 1, Maxt: 2}}, intervals) + } +} + var testData = []seriesSamples{ { lset: map[string]string{"a": "a"}, diff --git a/tsdb/tombstones/tombstones.go b/tsdb/tombstones/tombstones.go index a52e1caa9..94daf5195 100644 --- a/tsdb/tombstones/tombstones.go +++ b/tsdb/tombstones/tombstones.go @@ -18,6 +18,7 @@ import ( "fmt" "hash" "hash/crc32" + "math" "os" "path/filepath" "sort" @@ -252,7 +253,14 @@ func NewTestMemTombstones(intervals []Intervals) *MemTombstones { func (t *MemTombstones) Get(ref storage.SeriesRef) (Intervals, error) { t.mtx.RLock() defer t.mtx.RUnlock() - return t.intvlGroups[ref], nil + intervals, ok := t.intvlGroups[ref] + if !ok { + return nil, nil + } + // Make a copy to avoid race. + res := make(Intervals, len(intervals)) + copy(res, intervals) + return res, nil } func (t *MemTombstones) DeleteTombstones(refs map[storage.SeriesRef]struct{}) { @@ -349,17 +357,23 @@ func (in Intervals) Add(n Interval) Intervals { // Find min and max indexes of intervals that overlap with the new interval. // Intervals are closed [t1, t2] and t is discreet, so if neighbour intervals are 1 step difference // to the new one, we can merge those together. - mini := sort.Search(len(in), func(i int) bool { return in[i].Maxt >= n.Mint-1 }) - if mini == len(in) { - return append(in, n) + mini := 0 + if n.Mint != math.MinInt64 { // Avoid overflow. + mini = sort.Search(len(in), func(i int) bool { return in[i].Maxt >= n.Mint-1 }) + if mini == len(in) { + return append(in, n) + } } - maxi := sort.Search(len(in)-mini, func(i int) bool { return in[mini+i].Mint > n.Maxt+1 }) - if maxi == 0 { - if mini == 0 { - return append(Intervals{n}, in...) + maxi := len(in) + if n.Maxt != math.MaxInt64 { // Avoid overflow. + maxi = sort.Search(len(in)-mini, func(i int) bool { return in[mini+i].Mint > n.Maxt+1 }) + if maxi == 0 { + if mini == 0 { + return append(Intervals{n}, in...) + } + return append(in[:mini], append(Intervals{n}, in[mini:]...)...) } - return append(in[:mini], append(Intervals{n}, in[mini:]...)...) } if n.Mint < in[mini].Mint { diff --git a/tsdb/tombstones/tombstones_test.go b/tsdb/tombstones/tombstones_test.go index ffe25b0be..36c9f1c1e 100644 --- a/tsdb/tombstones/tombstones_test.go +++ b/tsdb/tombstones/tombstones_test.go @@ -81,6 +81,22 @@ func TestDeletingTombstones(t *testing.T) { require.Empty(t, intervals) } +func TestTombstonesGetWithCopy(t *testing.T) { + stones := NewMemTombstones() + stones.AddInterval(1, Intervals{{Mint: 1, Maxt: 2}, {Mint: 7, Maxt: 8}, {Mint: 11, Maxt: 12}}...) + + intervals0, err := stones.Get(1) + require.NoError(t, err) + require.Equal(t, Intervals{{Mint: 1, Maxt: 2}, {Mint: 7, Maxt: 8}, {Mint: 11, Maxt: 12}}, intervals0) + intervals1 := intervals0.Add(Interval{Mint: 4, Maxt: 6}) + require.Equal(t, Intervals{{Mint: 1, Maxt: 2}, {Mint: 4, Maxt: 8}, {Mint: 11, Maxt: 12}}, intervals0) // Original slice changed. + require.Equal(t, Intervals{{Mint: 1, Maxt: 2}, {Mint: 4, Maxt: 8}, {Mint: 11, Maxt: 12}}, intervals1) + + intervals2, err := stones.Get(1) + require.NoError(t, err) + require.Equal(t, Intervals{{Mint: 1, Maxt: 2}, {Mint: 7, Maxt: 8}, {Mint: 11, Maxt: 12}}, intervals2) +} + func TestTruncateBefore(t *testing.T) { cases := []struct { before Intervals @@ -210,6 +226,26 @@ func TestAddingNewIntervals(t *testing.T) { new: Interval{math.MinInt64, 10}, exp: Intervals{{math.MinInt64, math.MaxInt64}}, }, + { + exist: Intervals{{9, 10}}, + new: Interval{math.MinInt64, 7}, + exp: Intervals{{math.MinInt64, 7}, {9, 10}}, + }, + { + exist: Intervals{{9, 10}}, + new: Interval{12, math.MaxInt64}, + exp: Intervals{{9, 10}, {12, math.MaxInt64}}, + }, + { + exist: Intervals{{9, 10}}, + new: Interval{math.MinInt64, 8}, + exp: Intervals{{math.MinInt64, 10}}, + }, + { + exist: Intervals{{9, 10}}, + new: Interval{11, math.MaxInt64}, + exp: Intervals{{9, math.MaxInt64}}, + }, } for _, c := range cases { From f731a90a7f33cf19ea8a63d889a1830a05c0236d Mon Sep 17 00:00:00 2001 From: Baskar Shanmugam <123750249+codebasky@users.noreply.github.com> Date: Fri, 19 May 2023 13:06:30 +0530 Subject: [PATCH 149/632] Fix LabelValueStats in posting stats (#12342) Problem: LabelValueStats - This will provide a list of the label names and memory used in bytes. It is calculated by adding the length of all values for a given label name. But internally Prometheus stores the name and the value independently for each series. Solution: MemPostings struct maintains the values to seriesRef map which is used to get the number of series which contains the label values. Using that LabelValueStats is calculated as: seriesCnt * len(value name) Signed-off-by: Baskar Shanmugam --- tsdb/index/postings.go | 5 +++-- tsdb/index/postings_test.go | 29 +++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index b93d3a202..200100239 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -184,8 +184,9 @@ func (p *MemPostings) Stats(label string) *PostingsStats { if n == label { metrics.push(Stat{Name: name, Count: uint64(len(values))}) } - labelValuePairs.push(Stat{Name: n + "=" + name, Count: uint64(len(values))}) - size += uint64(len(name)) + seriesCnt := uint64(len(values)) + labelValuePairs.push(Stat{Name: n + "=" + name, Count: seriesCnt}) + size += uint64(len(name)) * seriesCnt } labelValueLength.push(Stat{Name: n, Count: size}) } diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index a34f3c12d..9d8b5ebf3 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -916,6 +916,35 @@ func BenchmarkPostings_Stats(b *testing.B) { } } +func TestMemPostingsStats(t *testing.T) { + // create a new MemPostings + p := NewMemPostings() + + // add some postings to the MemPostings + p.Add(1, labels.FromStrings("label", "value1")) + p.Add(1, labels.FromStrings("label", "value2")) + p.Add(1, labels.FromStrings("label", "value3")) + p.Add(2, labels.FromStrings("label", "value1")) + + // call the Stats method to calculate the cardinality statistics + stats := p.Stats("label") + + // assert that the expected statistics were calculated + require.Equal(t, uint64(2), stats.CardinalityMetricsStats[0].Count) + require.Equal(t, "value1", stats.CardinalityMetricsStats[0].Name) + + require.Equal(t, uint64(3), stats.CardinalityLabelStats[0].Count) + require.Equal(t, "label", stats.CardinalityLabelStats[0].Name) + + require.Equal(t, uint64(24), stats.LabelValueStats[0].Count) + require.Equal(t, "label", stats.LabelValueStats[0].Name) + + require.Equal(t, uint64(2), stats.LabelValuePairsStats[0].Count) + require.Equal(t, "label=value1", stats.LabelValuePairsStats[0].Name) + + require.Equal(t, 3, stats.NumLabelPairs) +} + func TestMemPostings_Delete(t *testing.T) { p := NewMemPostings() p.Add(1, labels.FromStrings("lbl1", "a")) From 92d69803606620505ed8f0226abb640a121ec1cd Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Fri, 19 May 2023 10:24:06 +0200 Subject: [PATCH 150/632] Fix populateWithDelChunkSeriesIterator and gauge histograms (#12330) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use AppendableGauge to detect corrupt chunk with gauge histograms. Detect if first sample is a gauge but the chunk is not set up to contain gauge histograms. Signed-off-by: György Krajcsovits Signed-off-by: George Krajcsovits --- tsdb/head_read.go | 8 +- tsdb/querier.go | 137 ++++++++++++++----- tsdb/querier_test.go | 274 +++++++++++++++++++++++++++++++++++++ tsdb/tsdbutil/chunks.go | 14 +- tsdb/tsdbutil/histogram.go | 10 ++ 5 files changed, 404 insertions(+), 39 deletions(-) diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 9c546ab16..0e6e005ea 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -331,7 +331,7 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc. } s.Unlock() - return &safeChunk{ + return &safeHeadChunk{ Chunk: chk, s: s, cid: cid, @@ -627,15 +627,15 @@ func (b boundedIterator) Seek(t int64) chunkenc.ValueType { return b.Iterator.Seek(t) } -// safeChunk makes sure that the chunk can be accessed without a race condition -type safeChunk struct { +// safeHeadChunk makes sure that the chunk can be accessed without a race condition +type safeHeadChunk struct { chunkenc.Chunk s *memSeries cid chunks.HeadChunkID isoState *isolationState } -func (c *safeChunk) Iterator(reuseIter chunkenc.Iterator) chunkenc.Iterator { +func (c *safeHeadChunk) Iterator(reuseIter chunkenc.Iterator) chunkenc.Iterator { c.s.Lock() it := c.s.iterator(c.cid, c.Chunk, c.isoState, reuseIter) c.s.Unlock() diff --git a/tsdb/querier.go b/tsdb/querier.go index 8b17bb745..8bcd451f3 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -785,14 +785,35 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool { if app, err = newChunk.Appender(); err != nil { break } - if hc, ok := p.currChkMeta.Chunk.(*chunkenc.HistogramChunk); ok { + + switch hc := p.currChkMeta.Chunk.(type) { + case *chunkenc.HistogramChunk: newChunk.(*chunkenc.HistogramChunk).SetCounterResetHeader(hc.GetCounterResetHeader()) + case *safeHeadChunk: + if unwrapped, ok := hc.Chunk.(*chunkenc.HistogramChunk); ok { + newChunk.(*chunkenc.HistogramChunk).SetCounterResetHeader(unwrapped.GetCounterResetHeader()) + } else { + err = fmt.Errorf("internal error, could not unwrap safeHeadChunk to histogram chunk: %T", hc.Chunk) + } + default: + err = fmt.Errorf("internal error, unknown chunk type %T when expecting histogram", p.currChkMeta.Chunk) } + if err != nil { + break + } + var h *histogram.Histogram t, h = p.currDelIter.AtHistogram() p.curr.MinTime = t + // Detect missing gauge reset hint. + if h.CounterResetHint == histogram.GaugeType && newChunk.(*chunkenc.HistogramChunk).GetCounterResetHeader() != chunkenc.GaugeType { + err = fmt.Errorf("found gauge histogram in non gauge chunk") + break + } + app.AppendHistogram(t, h) + for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() { if vt != chunkenc.ValHistogram { err = fmt.Errorf("found value type %v in histogram chunk", vt) @@ -801,23 +822,37 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool { t, h = p.currDelIter.AtHistogram() // Defend against corrupted chunks. - pI, nI, okToAppend, counterReset := app.(*chunkenc.HistogramAppender).Appendable(h) - if len(pI)+len(nI) > 0 { - err = fmt.Errorf( - "bucket layout has changed unexpectedly: %d positive and %d negative bucket interjections required", - len(pI), len(nI), - ) - break + if h.CounterResetHint == histogram.GaugeType { + pI, nI, bpI, bnI, _, _, okToAppend := app.(*chunkenc.HistogramAppender).AppendableGauge(h) + if !okToAppend { + err = errors.New("unable to append histogram due to unexpected schema change") + break + } + if len(pI)+len(nI)+len(bpI)+len(bnI) > 0 { + err = fmt.Errorf( + "bucket layout has changed unexpectedly: forward %d positive, %d negative, backward %d positive %d negative bucket interjections required", + len(pI), len(nI), len(bpI), len(bnI), + ) + break + } + } else { + pI, nI, okToAppend, counterReset := app.(*chunkenc.HistogramAppender).Appendable(h) + if len(pI)+len(nI) > 0 { + err = fmt.Errorf( + "bucket layout has changed unexpectedly: %d positive and %d negative bucket interjections required", + len(pI), len(nI), + ) + break + } + if counterReset { + err = errors.New("detected unexpected counter reset in histogram") + break + } + if !okToAppend { + err = errors.New("unable to append histogram due to unexpected schema change") + break + } } - if counterReset { - err = errors.New("detected unexpected counter reset in histogram") - break - } - if !okToAppend { - err = errors.New("unable to append histogram due to unexpected schema change") - break - } - app.AppendHistogram(t, h) } case chunkenc.ValFloat: @@ -842,14 +877,35 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool { if app, err = newChunk.Appender(); err != nil { break } - if hc, ok := p.currChkMeta.Chunk.(*chunkenc.FloatHistogramChunk); ok { + + switch hc := p.currChkMeta.Chunk.(type) { + case *chunkenc.FloatHistogramChunk: newChunk.(*chunkenc.FloatHistogramChunk).SetCounterResetHeader(hc.GetCounterResetHeader()) + case *safeHeadChunk: + if unwrapped, ok := hc.Chunk.(*chunkenc.FloatHistogramChunk); ok { + newChunk.(*chunkenc.FloatHistogramChunk).SetCounterResetHeader(unwrapped.GetCounterResetHeader()) + } else { + err = fmt.Errorf("internal error, could not unwrap safeHeadChunk to float histogram chunk: %T", hc.Chunk) + } + default: + err = fmt.Errorf("internal error, unknown chunk type %T when expecting float histogram", p.currChkMeta.Chunk) } + if err != nil { + break + } + var h *histogram.FloatHistogram t, h = p.currDelIter.AtFloatHistogram() p.curr.MinTime = t + // Detect missing gauge reset hint. + if h.CounterResetHint == histogram.GaugeType && newChunk.(*chunkenc.FloatHistogramChunk).GetCounterResetHeader() != chunkenc.GaugeType { + err = fmt.Errorf("found float gauge histogram in non gauge chunk") + break + } + app.AppendFloatHistogram(t, h) + for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() { if vt != chunkenc.ValFloatHistogram { err = fmt.Errorf("found value type %v in histogram chunk", vt) @@ -858,21 +914,36 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool { t, h = p.currDelIter.AtFloatHistogram() // Defend against corrupted chunks. - pI, nI, okToAppend, counterReset := app.(*chunkenc.FloatHistogramAppender).Appendable(h) - if len(pI)+len(nI) > 0 { - err = fmt.Errorf( - "bucket layout has changed unexpectedly: %d positive and %d negative bucket interjections required", - len(pI), len(nI), - ) - break - } - if counterReset { - err = errors.New("detected unexpected counter reset in histogram") - break - } - if !okToAppend { - err = errors.New("unable to append histogram due to unexpected schema change") - break + if h.CounterResetHint == histogram.GaugeType { + pI, nI, bpI, bnI, _, _, okToAppend := app.(*chunkenc.FloatHistogramAppender).AppendableGauge(h) + if !okToAppend { + err = errors.New("unable to append histogram due to unexpected schema change") + break + } + if len(pI)+len(nI)+len(bpI)+len(bnI) > 0 { + err = fmt.Errorf( + "bucket layout has changed unexpectedly: forward %d positive, %d negative, backward %d positive %d negative bucket interjections required", + len(pI), len(nI), len(bpI), len(bnI), + ) + break + } + } else { + pI, nI, okToAppend, counterReset := app.(*chunkenc.FloatHistogramAppender).Appendable(h) + if len(pI)+len(nI) > 0 { + err = fmt.Errorf( + "bucket layout has changed unexpectedly: %d positive and %d negative bucket interjections required", + len(pI), len(nI), + ) + break + } + if counterReset { + err = errors.New("detected unexpected counter reset in histogram") + break + } + if !okToAppend { + err = errors.New("unable to append histogram due to unexpected schema change") + break + } } app.AppendFloatHistogram(t, h) diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 6802fc250..5bf721a62 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -29,6 +29,7 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -907,6 +908,202 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, }, }, + { + name: "one histogram chunk", + chks: [][]tsdbutil.Sample{ + { + sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, + sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil}, + sample{3, 0, tsdbutil.GenerateTestHistogram(3), nil}, + sample{6, 0, tsdbutil.GenerateTestHistogram(6), nil}, + }, + }, + expected: []tsdbutil.Sample{ + sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, + sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil}, + sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil}, + sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil}, + }, + expectedChks: []chunks.Meta{ + tsdbutil.ChunkFromSamples([]tsdbutil.Sample{ + sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, + sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil}, + sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil}, + sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil}, + }), + }, + }, + { + name: "one histogram chunk intersect with deletion interval", + chks: [][]tsdbutil.Sample{ + { + sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, + sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil}, + sample{3, 0, tsdbutil.GenerateTestHistogram(3), nil}, + sample{6, 0, tsdbutil.GenerateTestHistogram(6), nil}, + }, + }, + intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}}, + expected: []tsdbutil.Sample{ + sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, + sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil}, + sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil}, + }, + expectedChks: []chunks.Meta{ + tsdbutil.ChunkFromSamples([]tsdbutil.Sample{ + sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, + sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil}, + sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil}, + }), + }, + }, + { + name: "one float histogram chunk", + chks: [][]tsdbutil.Sample{ + { + sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, + sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, + sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)}, + sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, + }, + }, + expected: []tsdbutil.Sample{ + sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, + sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))}, + sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))}, + sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))}, + }, + expectedChks: []chunks.Meta{ + tsdbutil.ChunkFromSamples([]tsdbutil.Sample{ + sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, + sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))}, + sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))}, + sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))}, + }), + }, + }, + { + name: "one float histogram chunk intersect with deletion interval", + chks: [][]tsdbutil.Sample{ + { + sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, + sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, + sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)}, + sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, + }, + }, + intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}}, + expected: []tsdbutil.Sample{ + sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, + sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))}, + sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))}, + }, + expectedChks: []chunks.Meta{ + tsdbutil.ChunkFromSamples([]tsdbutil.Sample{ + sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, + sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))}, + sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))}, + }), + }, + }, + { + name: "one gauge histogram chunk", + chks: [][]tsdbutil.Sample{ + { + sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil}, + sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil}, + sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil}, + sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil}, + }, + }, + expected: []tsdbutil.Sample{ + sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil}, + sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil}, + sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil}, + sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil}, + }, + expectedChks: []chunks.Meta{ + tsdbutil.ChunkFromSamples([]tsdbutil.Sample{ + sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil}, + sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil}, + sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil}, + sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil}, + }), + }, + }, + { + name: "one gauge histogram chunk intersect with deletion interval", + chks: [][]tsdbutil.Sample{ + { + sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil}, + sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil}, + sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil}, + sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil}, + }, + }, + intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}}, + expected: []tsdbutil.Sample{ + sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil}, + sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil}, + sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil}, + }, + expectedChks: []chunks.Meta{ + tsdbutil.ChunkFromSamples([]tsdbutil.Sample{ + sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil}, + sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil}, + sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil}, + }), + }, + }, + { + name: "one gauge float histogram", + chks: [][]tsdbutil.Sample{ + { + sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)}, + sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)}, + sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)}, + sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)}, + }, + }, + expected: []tsdbutil.Sample{ + sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)}, + sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)}, + sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)}, + sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)}, + }, + expectedChks: []chunks.Meta{ + tsdbutil.ChunkFromSamples([]tsdbutil.Sample{ + sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)}, + sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)}, + sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)}, + sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)}, + }), + }, + }, + { + name: "one gauge float histogram chunk intersect with deletion interval", + chks: [][]tsdbutil.Sample{ + { + sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)}, + sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)}, + sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)}, + sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)}, + }, + }, + intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}}, + expected: []tsdbutil.Sample{ + sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)}, + sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)}, + sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)}, + }, + expectedChks: []chunks.Meta{ + tsdbutil.ChunkFromSamples([]tsdbutil.Sample{ + sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)}, + sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)}, + sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)}, + }), + }, + }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { @@ -2411,3 +2608,80 @@ func BenchmarkHeadQuerier(b *testing.B) { require.NoError(b, ss.Err()) } } + +// This is a regression test for the case where gauge histograms were not handled by +// populateWithDelChunkSeriesIterator correctly. +func TestQueryWithDeletedHistograms(t *testing.T) { + testcases := map[string]func(int) (*histogram.Histogram, *histogram.FloatHistogram){ + "intCounter": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) { + return tsdbutil.GenerateTestHistogram(i), nil + }, + "intgauge": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) { + return tsdbutil.GenerateTestGaugeHistogram(rand.Int() % 1000), nil + }, + "floatCounter": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) { + return nil, tsdbutil.GenerateTestFloatHistogram(i) + }, + "floatGauge": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) { + return nil, tsdbutil.GenerateTestGaugeFloatHistogram(rand.Int() % 1000) + }, + } + + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + db := openTestDB(t, nil, nil) + defer func() { + require.NoError(t, db.Close()) + }() + + db.EnableNativeHistograms() + appender := db.Appender(context.Background()) + + var ( + err error + seriesRef storage.SeriesRef + ) + lbs := labels.FromStrings("__name__", "test", "type", name) + + for i := 0; i < 100; i++ { + h, fh := tc(i) + seriesRef, err = appender.AppendHistogram(seriesRef, lbs, int64(i), h, fh) + require.NoError(t, err) + } + + err = appender.Commit() + require.NoError(t, err) + + matcher, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test") + require.NoError(t, err) + + // Delete the last 20. + err = db.Delete(80, 100, matcher) + require.NoError(t, err) + + chunkQuerier, err := db.ChunkQuerier(context.Background(), 0, 100) + require.NoError(t, err) + + css := chunkQuerier.Select(false, nil, matcher) + + seriesCount := 0 + for css.Next() { + seriesCount++ + series := css.At() + + sampleCount := 0 + it := series.Iterator(nil) + for it.Next() { + chk := it.At() + for cit := chk.Chunk.Iterator(nil); cit.Next() != chunkenc.ValNone; { + sampleCount++ + } + } + require.NoError(t, it.Err()) + require.Equal(t, 80, sampleCount) + } + require.NoError(t, css.Err()) + require.Equal(t, 1, seriesCount) + }) + } +} diff --git a/tsdb/tsdbutil/chunks.go b/tsdb/tsdbutil/chunks.go index 02a7dd619..6e57016fe 100644 --- a/tsdb/tsdbutil/chunks.go +++ b/tsdb/tsdbutil/chunks.go @@ -71,9 +71,19 @@ func ChunkFromSamplesGeneric(s Samples) chunks.Meta { case chunkenc.ValFloat: ca.Append(s.Get(i).T(), s.Get(i).F()) case chunkenc.ValHistogram: - ca.AppendHistogram(s.Get(i).T(), s.Get(i).H()) + h := s.Get(i).H() + ca.AppendHistogram(s.Get(i).T(), h) + if i == 0 && h.CounterResetHint == histogram.GaugeType { + hc := c.(*chunkenc.HistogramChunk) + hc.SetCounterResetHeader(chunkenc.GaugeType) + } case chunkenc.ValFloatHistogram: - ca.AppendFloatHistogram(s.Get(i).T(), s.Get(i).FH()) + fh := s.Get(i).FH() + ca.AppendFloatHistogram(s.Get(i).T(), fh) + if i == 0 && fh.CounterResetHint == histogram.GaugeType { + hc := c.(*chunkenc.FloatHistogramChunk) + hc.SetCounterResetHeader(chunkenc.GaugeType) + } default: panic(fmt.Sprintf("unknown sample type %s", sampleType.String())) } diff --git a/tsdb/tsdbutil/histogram.go b/tsdb/tsdbutil/histogram.go index 3c276c841..2145034e1 100644 --- a/tsdb/tsdbutil/histogram.go +++ b/tsdb/tsdbutil/histogram.go @@ -108,3 +108,13 @@ func GenerateTestGaugeFloatHistogram(i int) *histogram.FloatHistogram { h.CounterResetHint = histogram.GaugeType return h } + +func SetHistogramNotCounterReset(h *histogram.Histogram) *histogram.Histogram { + h.CounterResetHint = histogram.NotCounterReset + return h +} + +func SetFloatHistogramNotCounterReset(h *histogram.FloatHistogram) *histogram.FloatHistogram { + h.CounterResetHint = histogram.NotCounterReset + return h +} From c8e7f95a3cbfbb26eb48e86155ca1f4cfe20dc8f Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Sun, 21 May 2023 09:20:07 +0200 Subject: [PATCH 151/632] ci(lint): enable predeclared linter Signed-off-by: Matthieu MOREL --- .golangci.yml | 3 ++- tsdb/exemplar.go | 6 +++--- tsdb/isolation.go | 4 ++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index d1cd86ed5..fc2721455 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -13,8 +13,9 @@ linters: - gocritic - gofumpt - goimports - - revive - misspell + - predeclared + - revive - unconvert - unused diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index ad3b2ef39..01718bb57 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -216,9 +216,9 @@ func (ce *CircularExemplarStorage) ValidateExemplar(l labels.Labels, e exemplar. return ce.validateExemplar(seriesLabels, e, false) } -// Not thread safe. The append parameters tells us whether this is an external validation, or internal +// Not thread safe. The appended parameters tells us whether this is an external validation, or internal // as a result of an AddExemplar call, in which case we should update any relevant metrics. -func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemplar, append bool) error { +func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemplar, appended bool) error { if len(ce.exemplars) == 0 { return storage.ErrExemplarsDisabled } @@ -250,7 +250,7 @@ func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemp } if e.Ts <= ce.exemplars[idx.newest].exemplar.Ts { - if append { + if appended { ce.metrics.outOfOrderExemplars.Inc() } return storage.ErrOutOfOrderExemplar diff --git a/tsdb/isolation.go b/tsdb/isolation.go index 401e5885a..e436884a8 100644 --- a/tsdb/isolation.go +++ b/tsdb/isolation.go @@ -244,9 +244,9 @@ type txRing struct { txIDCount int // How many ids in the ring. } -func newTxRing(cap int) *txRing { +func newTxRing(capacity int) *txRing { return &txRing{ - txIDs: make([]uint64, cap), + txIDs: make([]uint64, capacity), } } From 8c5d4b4add2c528701cced254d1272824b9ec3ba Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Sun, 21 May 2023 01:41:30 -0700 Subject: [PATCH 152/632] Opmize MatchNotEqual (#12377) Signed-off-by: Alan Protasio --- tsdb/querier.go | 24 ++++++++++++++++-------- tsdb/querier_bench_test.go | 1 + 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/tsdb/querier.go b/tsdb/querier.go index 8bcd451f3..9baf3f242 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -361,6 +361,22 @@ func postingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Postings, erro // inversePostingsForMatcher returns the postings for the series with the label name set but not matching the matcher. func inversePostingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Postings, error) { + // Fast-path for MatchNotRegexp matching. + // Inverse of a MatchNotRegexp is MatchRegexp (double negation). + // Fast-path for set matching. + if m.Type == labels.MatchNotRegexp { + setMatches := findSetMatches(m.GetRegexString()) + if len(setMatches) > 0 { + return ix.Postings(m.Name, setMatches...) + } + } + + // Fast-path for MatchNotEqual matching. + // Inverse of a MatchNotEqual is MatchEqual (double negation). + if m.Type == labels.MatchNotEqual { + return ix.Postings(m.Name, m.Value) + } + vals, err := ix.LabelValues(m.Name) if err != nil { return nil, err @@ -371,14 +387,6 @@ func inversePostingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Posting if m.Type == labels.MatchEqual && m.Value == "" { res = vals } else { - // Inverse of a MatchNotRegexp is MatchRegexp (double negation). - // Fast-path for set matching. - if m.Type == labels.MatchNotRegexp { - setMatches := findSetMatches(m.GetRegexString()) - if len(setMatches) > 0 { - return ix.Postings(m.Name, setMatches...) - } - } for _, val := range vals { if !m.Matches(val) { res = append(res, val) diff --git a/tsdb/querier_bench_test.go b/tsdb/querier_bench_test.go index 6e5243ef1..19619e35b 100644 --- a/tsdb/querier_bench_test.go +++ b/tsdb/querier_bench_test.go @@ -125,6 +125,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) { {`n="X",j="foo"`, []*labels.Matcher{nX, jFoo}}, {`j="foo",n="1"`, []*labels.Matcher{jFoo, n1}}, {`n="1",j!="foo"`, []*labels.Matcher{n1, jNotFoo}}, + {`n="1",i!="2"`, []*labels.Matcher{n1, iNot2}}, {`n="X",j!="foo"`, []*labels.Matcher{nX, jNotFoo}}, {`i=~"1[0-9]",j=~"foo|bar"`, []*labels.Matcher{iCharSet, jFooBar}}, {`j=~"foo|bar"`, []*labels.Matcher{jFooBar}}, From 905a0bd63a12fa328eac4b7f2674356c283bacac Mon Sep 17 00:00:00 2001 From: Baskar Shanmugam <123750249+codebasky@users.noreply.github.com> Date: Mon, 22 May 2023 18:07:07 +0530 Subject: [PATCH 153/632] Added 'limit' query parameter support to /api/v1/status/tsdb endpoint (#12336) * Added 'topN' query parameter support to /api/v1/status/tsdb endpoint Signed-off-by: Baskar Shanmugam * Updated query parameter for tsdb status to 'limit' Signed-off-by: Baskar Shanmugam * Corrected Stats() parameter name from topN to limit Signed-off-by: Baskar Shanmugam * Fixed p.Stats CI failure Signed-off-by: Baskar Shanmugam --------- Signed-off-by: Baskar Shanmugam --- cmd/prometheus/main.go | 4 ++-- docs/querying/api.md | 4 ++++ tsdb/head.go | 8 ++++---- tsdb/index/postings.go | 12 +++++------- tsdb/index/postings_test.go | 4 ++-- web/api/v1/api.go | 13 ++++++++++--- web/api/v1/api_test.go | 19 +++++++++++++++---- web/federate_test.go | 2 +- web/web_test.go | 4 ++-- 9 files changed, 45 insertions(+), 25 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 778b131c8..e05ac7957 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -1486,11 +1486,11 @@ func (s *readyStorage) Snapshot(dir string, withHead bool) error { } // Stats implements the api_v1.TSDBAdminStats interface. -func (s *readyStorage) Stats(statsByLabelName string) (*tsdb.Stats, error) { +func (s *readyStorage) Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) { if x := s.get(); x != nil { switch db := x.(type) { case *tsdb.DB: - return db.Head().Stats(statsByLabelName), nil + return db.Head().Stats(statsByLabelName, limit), nil case *agent.DB: return nil, agent.ErrUnsupported default: diff --git a/docs/querying/api.md b/docs/querying/api.md index ef7fa54c6..edce366ee 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -1074,6 +1074,10 @@ The following endpoint returns various cardinality statistics about the Promethe ``` GET /api/v1/status/tsdb ``` +URL query parameters: +- `limit=`: Limit the number of returned items to a given number for each set of statistics. By default, 10 items are returned. + +The `data` section of the query result consists of - **headStats**: This provides the following data about the head block of the TSDB: - **numSeries**: The number of series. - **chunkCount**: The number of chunks. diff --git a/tsdb/head.go b/tsdb/head.go index aa7a3c125..f094b3662 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -978,7 +978,7 @@ func (h *Head) DisableNativeHistograms() { } // PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names. -func (h *Head) PostingsCardinalityStats(statsByLabelName string) *index.PostingsStats { +func (h *Head) PostingsCardinalityStats(statsByLabelName string, limit int) *index.PostingsStats { h.cardinalityMutex.Lock() defer h.cardinalityMutex.Unlock() currentTime := time.Duration(time.Now().Unix()) * time.Second @@ -989,7 +989,7 @@ func (h *Head) PostingsCardinalityStats(statsByLabelName string) *index.Postings if h.cardinalityCache != nil { return h.cardinalityCache } - h.cardinalityCache = h.postings.Stats(statsByLabelName) + h.cardinalityCache = h.postings.Stats(statsByLabelName, limit) h.lastPostingsStatsCall = time.Duration(time.Now().Unix()) * time.Second return h.cardinalityCache @@ -1329,12 +1329,12 @@ type Stats struct { // Stats returns important current HEAD statistics. Note that it is expensive to // calculate these. -func (h *Head) Stats(statsByLabelName string) *Stats { +func (h *Head) Stats(statsByLabelName string, limit int) *Stats { return &Stats{ NumSeries: h.NumSeries(), MaxTime: h.MaxTime(), MinTime: h.MinTime(), - IndexPostingStats: h.PostingsCardinalityStats(statsByLabelName), + IndexPostingStats: h.PostingsCardinalityStats(statsByLabelName, limit), } } diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 200100239..2ac6edbdc 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -156,10 +156,8 @@ type PostingsStats struct { } // Stats calculates the cardinality statistics from postings. -func (p *MemPostings) Stats(label string) *PostingsStats { - const maxNumOfRecords = 10 +func (p *MemPostings) Stats(label string, limit int) *PostingsStats { var size uint64 - p.mtx.RLock() metrics := &maxHeap{} @@ -168,10 +166,10 @@ func (p *MemPostings) Stats(label string) *PostingsStats { labelValuePairs := &maxHeap{} numLabelPairs := 0 - metrics.init(maxNumOfRecords) - labels.init(maxNumOfRecords) - labelValueLength.init(maxNumOfRecords) - labelValuePairs.init(maxNumOfRecords) + metrics.init(limit) + labels.init(limit) + labelValueLength.init(limit) + labelValuePairs.init(limit) for n, e := range p.m { if n == "" { diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index 9d8b5ebf3..9454def46 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -912,7 +912,7 @@ func BenchmarkPostings_Stats(b *testing.B) { } b.ResetTimer() for n := 0; n < b.N; n++ { - p.Stats("__name__") + p.Stats("__name__", 10) } } @@ -927,7 +927,7 @@ func TestMemPostingsStats(t *testing.T) { p.Add(2, labels.FromStrings("label", "value1")) // call the Stats method to calculate the cardinality statistics - stats := p.Stats("label") + stats := p.Stats("label", 10) // assert that the expected statistics were calculated require.Equal(t, uint64(2), stats.CardinalityMetricsStats[0].Count) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index e1168e1a6..f7249efb0 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -171,7 +171,7 @@ type TSDBAdminStats interface { CleanTombstones() error Delete(mint, maxt int64, ms ...*labels.Matcher) error Snapshot(dir string, withHead bool) error - Stats(statsByLabelName string) (*tsdb.Stats, error) + Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) WALReplayStatus() (tsdb.WALReplayStatus, error) } @@ -1472,8 +1472,15 @@ func TSDBStatsFromIndexStats(stats []index.Stat) []TSDBStat { return result } -func (api *API) serveTSDBStatus(*http.Request) apiFuncResult { - s, err := api.db.Stats(labels.MetricName) +func (api *API) serveTSDBStatus(r *http.Request) apiFuncResult { + limit := 10 + if s := r.FormValue("limit"); s != "" { + var err error + if limit, err = strconv.Atoi(s); err != nil || limit < 1 { + return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit must be a positive number")}, nil, nil} + } + } + s, err := api.db.Stats(labels.MetricName, limit) if err != nil { return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil} } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 620acd862..baee2189e 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -2622,7 +2622,7 @@ type fakeDB struct { func (f *fakeDB) CleanTombstones() error { return f.err } func (f *fakeDB) Delete(int64, int64, ...*labels.Matcher) error { return f.err } func (f *fakeDB) Snapshot(string, bool) error { return f.err } -func (f *fakeDB) Stats(statsByLabelName string) (_ *tsdb.Stats, retErr error) { +func (f *fakeDB) Stats(statsByLabelName string, limit int) (_ *tsdb.Stats, retErr error) { dbDir, err := os.MkdirTemp("", "tsdb-api-ready") if err != nil { return nil, err @@ -2636,7 +2636,7 @@ func (f *fakeDB) Stats(statsByLabelName string) (_ *tsdb.Stats, retErr error) { opts := tsdb.DefaultHeadOptions() opts.ChunkRange = 1000 h, _ := tsdb.NewHead(nil, nil, nil, nil, opts, nil) - return h.Stats(statsByLabelName), nil + return h.Stats(statsByLabelName, limit), nil } func (f *fakeDB) WALReplayStatus() (tsdb.WALReplayStatus, error) { @@ -3283,8 +3283,19 @@ func TestTSDBStatus(t *testing.T) { { db: tsdb, endpoint: tsdbStatusAPI, - - errType: errorNone, + errType: errorNone, + }, + { + db: tsdb, + endpoint: tsdbStatusAPI, + values: map[string][]string{"limit": {"20"}}, + errType: errorNone, + }, + { + db: tsdb, + endpoint: tsdbStatusAPI, + values: map[string][]string{"limit": {"0"}}, + errType: errorBadData, }, } { tc := tc diff --git a/web/federate_test.go b/web/federate_test.go index bf7b6fefe..6bc0ae212 100644 --- a/web/federate_test.go +++ b/web/federate_test.go @@ -251,7 +251,7 @@ func (notReadyReadStorage) StartTime() (int64, error) { return 0, errors.Wrap(tsdb.ErrNotReady, "wrap") } -func (notReadyReadStorage) Stats(string) (*tsdb.Stats, error) { +func (notReadyReadStorage) Stats(string, int) (*tsdb.Stats, error) { return nil, errors.Wrap(tsdb.ErrNotReady, "wrap") } diff --git a/web/web_test.go b/web/web_test.go index 7da06a306..8832c2839 100644 --- a/web/web_test.go +++ b/web/web_test.go @@ -52,8 +52,8 @@ type dbAdapter struct { *tsdb.DB } -func (a *dbAdapter) Stats(statsByLabelName string) (*tsdb.Stats, error) { - return a.Head().Stats(statsByLabelName), nil +func (a *dbAdapter) Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) { + return a.Head().Stats(statsByLabelName, limit), nil } func (a *dbAdapter) WALReplayStatus() (tsdb.WALReplayStatus, error) { From 3524a16aa03d7b91a6d0484aa4ed6f1d443837f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Tue, 23 May 2023 10:29:17 +0200 Subject: [PATCH 154/632] feat: add suggested changes, tests, and stdin support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- cmd/promtool/main.go | 11 +-- cmd/promtool/metrics.go | 157 +++++++--------------------------- docs/command-line/promtool.md | 4 +- util/fmtutil/format.go | 142 ++++++++++++++++++++++++++++++ util/fmtutil/format_test.go | 71 +++++++++++++++ 5 files changed, 251 insertions(+), 134 deletions(-) create mode 100644 util/fmtutil/format.go create mode 100644 util/fmtutil/format_test.go diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index c4077954e..3b5ba78e4 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -81,6 +81,7 @@ func main() { var ( httpRoundTripper = api.DefaultRoundTripper serverURL *url.URL + remoteWriteURL *url.URL httpConfigFilePath string ) @@ -180,12 +181,12 @@ func main() { pushCmd := app.Command("push", "Push to a Prometheus server.") pushCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath) - pushMetricsCmd := pushCmd.Command("metrics", "Push metrics to a prometheus remote write.") - pushMetricsCmd.Arg("remote-write-url", "Prometheus remote write url to push metrics.").Required().URLVar(&serverURL) + pushMetricsCmd := pushCmd.Command("metrics", "Push metrics to a prometheus remote write (for testing purpose only).") + pushMetricsCmd.Arg("remote-write-url", "Prometheus remote write url to push metrics.").Required().URLVar(&remoteWriteURL) metricFiles := pushMetricsCmd.Arg( "metric-files", - "The metric files to push.", - ).Required().ExistingFiles() + "The metric files to push, default is read from standard input (STDIN).", + ).ExistingFiles() metricJobLabel := pushMetricsCmd.Flag("job-label", "Job label to attach to metrics.").Default("promtool").String() pushMetricsTimeout := pushMetricsCmd.Flag("timeout", "The time to wait for pushing metrics.").Default("30s").Duration() pushMetricsHeaders := pushMetricsCmd.Flag("header", "Prometheus remote write header.").StringMap() @@ -314,7 +315,7 @@ func main() { os.Exit(CheckMetrics(*checkMetricsExtended)) case pushMetricsCmd.FullCommand(): - os.Exit(PushMetrics(serverURL, httpRoundTripper, *pushMetricsHeaders, *pushMetricsTimeout, *metricJobLabel, *metricFiles...)) + os.Exit(PushMetrics(remoteWriteURL, httpRoundTripper, *pushMetricsHeaders, *pushMetricsTimeout, *metricJobLabel, *metricFiles...)) case queryInstantCmd.FullCommand(): os.Exit(QueryInstant(serverURL, httpRoundTripper, *queryInstantExpr, *queryInstantTime, p)) diff --git a/cmd/promtool/metrics.go b/cmd/promtool/metrics.go index 21fcd3e66..c845b5a58 100644 --- a/cmd/promtool/metrics.go +++ b/cmd/promtool/metrics.go @@ -21,22 +21,18 @@ import ( "net/http" "net/url" "os" - "sort" "time" "github.com/golang/snappy" - dto "github.com/prometheus/client_model/go" config_util "github.com/prometheus/common/config" - "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage/remote" + "github.com/prometheus/prometheus/util/fmtutil" ) -// Push metrics to a prometheus remote write. +// Push metrics to a prometheus remote write (for testing purpose only). func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, jobLabel string, files ...string) int { - // remote write should respect specification: https://prometheus.io/docs/concepts/remote_write_spec/ failed := false addressURL, err := url.Parse(url.String()) @@ -67,18 +63,36 @@ func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[strin headers: headers, } - for _, f := range files { + // add empty string to avoid matching filename + if len(files) == 0 { + files = append(files, "") + } + + for _, file := range files { var data []byte var err error - data, err = os.ReadFile(f) - if err != nil { - fmt.Fprintln(os.Stderr, err) - failed = true - continue - } - fmt.Printf("Parsing metric file %s\n", f) - metricsData, err := parseMetricsTextAndFormat(bytes.NewReader(data), jobLabel) + // if file is an empty string it is a stdin + if file == "" { + data, err = io.ReadAll(os.Stdin) + if err != nil { + fmt.Fprintln(os.Stderr, err) + failed = true + break + } + + fmt.Printf("Parsing stdin\n") + } else { + data, err = os.ReadFile(file) + if err != nil { + fmt.Fprintln(os.Stderr, err) + failed = true + continue + } + + fmt.Printf("Parsing metric file %s\n", file) + } + metricsData, err := fmtutil.ParseMetricsTextAndFormat(bytes.NewReader(data), jobLabel) if err != nil { fmt.Fprintln(os.Stderr, err) failed = true @@ -100,7 +114,7 @@ func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[strin failed = true continue } - fmt.Printf("Successfully pushed metric file %s\n", f) + fmt.Printf("Successfully pushed metric file %s\n", file) } if failed { @@ -121,114 +135,3 @@ func (s *setHeadersTransport) RoundTrip(req *http.Request) (*http.Response, erro } return s.RoundTripper.RoundTrip(req) } - -var MetricMetadataTypeValue = map[string]int32{ - "UNKNOWN": 0, - "COUNTER": 1, - "GAUGE": 2, - "HISTOGRAM": 3, - "GAUGEHISTOGRAM": 4, - "SUMMARY": 5, - "INFO": 6, - "STATESET": 7, -} - -// formatMetrics convert metric family to a writerequest -func formatMetrics(mf map[string]*dto.MetricFamily, jobLabel string) (*prompb.WriteRequest, error) { - wr := &prompb.WriteRequest{} - - // build metric list - sortedMetricNames := make([]string, 0, len(mf)) - for metric := range mf { - sortedMetricNames = append(sortedMetricNames, metric) - } - // sort metrics name in lexicographical order - sort.Strings(sortedMetricNames) - - for _, metricName := range sortedMetricNames { - // Set metadata writerequest - mtype := MetricMetadataTypeValue[mf[metricName].Type.String()] - metadata := prompb.MetricMetadata{ - MetricFamilyName: mf[metricName].GetName(), - Type: prompb.MetricMetadata_MetricType(mtype), - Help: mf[metricName].GetHelp(), - } - wr.Metadata = append(wr.Metadata, metadata) - - for _, metric := range mf[metricName].Metric { - var timeserie prompb.TimeSeries - - // build labels map - labels := make(map[string]string, len(metric.Label)+2) - labels[model.MetricNameLabel] = metricName - labels[model.JobLabel] = jobLabel - - for _, label := range metric.Label { - labelname := label.GetName() - if labelname == model.JobLabel { - labelname = fmt.Sprintf("%s%s", model.ExportedLabelPrefix, labelname) - } - labels[labelname] = label.GetValue() - } - - // build labels name list - sortedLabelNames := make([]string, 0, len(labels)) - for label := range labels { - sortedLabelNames = append(sortedLabelNames, label) - } - // sort labels name in lexicographical order - sort.Strings(sortedLabelNames) - - for _, label := range sortedLabelNames { - timeserie.Labels = append(timeserie.Labels, prompb.Label{ - Name: label, - Value: labels[label], - }) - } - - timeserie.Samples = []prompb.Sample{ - { - Timestamp: time.Now().UnixNano() / int64(time.Millisecond), - Value: getMetricsValue(metric), - }, - } - - wr.Timeseries = append(wr.Timeseries, timeserie) - } - } - return wr, nil -} - -// parseMetricsTextReader consumes an io.Reader and returns the MetricFamily -func parseMetricsTextReader(input io.Reader) (map[string]*dto.MetricFamily, error) { - var parser expfmt.TextParser - mf, err := parser.TextToMetricFamilies(input) - if err != nil { - return nil, err - } - return mf, nil -} - -// getMetricsValue return the value of a timeserie without the need to give value type -func getMetricsValue(m *dto.Metric) float64 { - switch { - case m.Gauge != nil: - return m.GetGauge().GetValue() - case m.Counter != nil: - return m.GetCounter().GetValue() - case m.Untyped != nil: - return m.GetUntyped().GetValue() - default: - return 0. - } -} - -// parseMetricsTextAndFormat return the data in the expected prometheus metrics write request format -func parseMetricsTextAndFormat(input io.Reader, jobLabel string) (*prompb.WriteRequest, error) { - mf, err := parseMetricsTextReader(input) - if err != nil { - return nil, err - } - - return formatMetrics(mf, jobLabel) -} diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index ac159a921..024c71e51 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -390,7 +390,7 @@ Push to a Prometheus server. ##### `promtool push metrics` -Push metrics to a prometheus remote write. +Push metrics to a prometheus remote write (for testing purpose only). @@ -410,7 +410,7 @@ Push metrics to a prometheus remote write. | Argument | Description | Required | | --- | --- | --- | | remote-write-url | Prometheus remote write url to push metrics. | Yes | -| metric-files | The metric files to push. | Yes | +| metric-files | The metric files to push, default is read from standard input (STDIN). | | diff --git a/util/fmtutil/format.go b/util/fmtutil/format.go new file mode 100644 index 000000000..9a06d6bb1 --- /dev/null +++ b/util/fmtutil/format.go @@ -0,0 +1,142 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fmtutil + +import ( + "fmt" + "io" + "sort" + "time" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/prompb" +) + +var MetricMetadataTypeValue = map[string]int32{ + "UNKNOWN": 0, + "COUNTER": 1, + "GAUGE": 2, + "HISTOGRAM": 3, + "GAUGEHISTOGRAM": 4, + "SUMMARY": 5, + "INFO": 6, + "STATESET": 7, +} + +// FormatMetrics convert metric family to a writerequest. +func FormatMetrics(mf map[string]*dto.MetricFamily, jobLabel string) (*prompb.WriteRequest, error) { + wr := &prompb.WriteRequest{} + + // build metric list + sortedMetricNames := make([]string, 0, len(mf)) + for metric := range mf { + sortedMetricNames = append(sortedMetricNames, metric) + } + // sort metrics name in lexicographical order + sort.Strings(sortedMetricNames) + + for _, metricName := range sortedMetricNames { + // Set metadata writerequest + mtype := MetricMetadataTypeValue[mf[metricName].Type.String()] + metadata := prompb.MetricMetadata{ + MetricFamilyName: mf[metricName].GetName(), + Type: prompb.MetricMetadata_MetricType(mtype), + Help: mf[metricName].GetHelp(), + } + wr.Metadata = append(wr.Metadata, metadata) + + for _, metric := range mf[metricName].Metric { + var timeserie prompb.TimeSeries + + // build labels map + labels := make(map[string]string, len(metric.Label)+2) + labels[model.MetricNameLabel] = metricName + labels[model.JobLabel] = jobLabel + + for _, label := range metric.Label { + labelname := label.GetName() + if labelname == model.JobLabel { + labelname = fmt.Sprintf("%s%s", model.ExportedLabelPrefix, labelname) + } + labels[labelname] = label.GetValue() + } + + // build labels name list + sortedLabelNames := make([]string, 0, len(labels)) + for label := range labels { + sortedLabelNames = append(sortedLabelNames, label) + } + // sort labels name in lexicographical order + sort.Strings(sortedLabelNames) + + for _, label := range sortedLabelNames { + timeserie.Labels = append(timeserie.Labels, prompb.Label{ + Name: label, + Value: labels[label], + }) + } + + timestamp := metric.GetTimestampMs() + if timestamp == 0 { + timestamp = time.Now().UnixNano() / int64(time.Millisecond) + } + + timeserie.Samples = []prompb.Sample{ + { + Timestamp: timestamp, + Value: getMetricsValue(metric), + }, + } + + wr.Timeseries = append(wr.Timeseries, timeserie) + } + } + return wr, nil +} + +// getMetricsValue return the value of a timeserie without the need to give value type +func getMetricsValue(m *dto.Metric) float64 { + switch { + case m.Gauge != nil: + return m.GetGauge().GetValue() + case m.Counter != nil: + return m.GetCounter().GetValue() + case m.Untyped != nil: + return m.GetUntyped().GetValue() + default: + return 0. + } +} + +// ParseMetricsTextReader consumes an io.Reader and returns the MetricFamily. +func ParseMetricsTextReader(input io.Reader) (map[string]*dto.MetricFamily, error) { + var parser expfmt.TextParser + mf, err := parser.TextToMetricFamilies(input) + if err != nil { + return nil, err + } + return mf, nil +} + +// ParseMetricsTextAndFormat return the data in the expected prometheus metrics write request format. +func ParseMetricsTextAndFormat(input io.Reader, jobLabel string) (*prompb.WriteRequest, error) { + mf, err := ParseMetricsTextReader(input) + if err != nil { + return nil, err + } + return FormatMetrics(mf, jobLabel) +} diff --git a/util/fmtutil/format_test.go b/util/fmtutil/format_test.go new file mode 100644 index 000000000..ef3b7fcd4 --- /dev/null +++ b/util/fmtutil/format_test.go @@ -0,0 +1,71 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fmtutil + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/prompb" +) + +var writeRequestFixture = &prompb.WriteRequest{ + Metadata: []prompb.MetricMetadata{ + { + MetricFamilyName: "test_metric1", + Type: 2, + Help: "this is a test metric", + }, + }, + Timeseries: []prompb.TimeSeries{ + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "test_metric1"}, + {Name: "b", Value: "c"}, + {Name: "baz", Value: "qux"}, + {Name: "d", Value: "e"}, + {Name: "foo", Value: "bar"}, + {Name: "job", Value: "promtool"}, + }, + Samples: []prompb.Sample{{Value: 1, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "test_metric1"}, + {Name: "b", Value: "c"}, + {Name: "baz", Value: "qux"}, + {Name: "d", Value: "e"}, + {Name: "foo", Value: "bar"}, + {Name: "job", Value: "promtool"}, + }, + Samples: []prompb.Sample{{Value: 2, Timestamp: 1}}, + }, + }, +} + +func TestParseMetricsTextAndFormat(t *testing.T) { + input := bytes.NewReader([]byte(` + # HELP test_metric1 this is a test metric + # TYPE test_metric1 gauge + test_metric1{b="c",baz="qux",d="e",foo="bar"} 1 1 + test_metric1{b="c",baz="qux",d="e",foo="bar"} 2 1 + `)) + + expected, err := ParseMetricsTextAndFormat(input, "promtool") + require.NoError(t, err) + + require.Equal(t, writeRequestFixture, expected) +} From 934c5ddb8d223c6363033a48919e014c63e15ccb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Wed, 24 May 2023 10:55:49 +0200 Subject: [PATCH 155/632] feat: make push metrics labels generic and repeatable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- cmd/promtool/main.go | 4 ++-- cmd/promtool/metrics.go | 25 +++++++++++++++---------- docs/command-line/promtool.md | 2 +- util/fmtutil/format.go | 15 ++++++++++----- util/fmtutil/format_test.go | 3 ++- 5 files changed, 30 insertions(+), 19 deletions(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 3b5ba78e4..c76790e13 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -187,7 +187,7 @@ func main() { "metric-files", "The metric files to push, default is read from standard input (STDIN).", ).ExistingFiles() - metricJobLabel := pushMetricsCmd.Flag("job-label", "Job label to attach to metrics.").Default("promtool").String() + pushMetricsLabels := pushMetricsCmd.Flag("label", "Label to attach to metrics. Can be specified multiple times.").Default("job=promtool").StringMap() pushMetricsTimeout := pushMetricsCmd.Flag("timeout", "The time to wait for pushing metrics.").Default("30s").Duration() pushMetricsHeaders := pushMetricsCmd.Flag("header", "Prometheus remote write header.").StringMap() @@ -315,7 +315,7 @@ func main() { os.Exit(CheckMetrics(*checkMetricsExtended)) case pushMetricsCmd.FullCommand(): - os.Exit(PushMetrics(remoteWriteURL, httpRoundTripper, *pushMetricsHeaders, *pushMetricsTimeout, *metricJobLabel, *metricFiles...)) + os.Exit(PushMetrics(remoteWriteURL, httpRoundTripper, *pushMetricsHeaders, *pushMetricsTimeout, *pushMetricsLabels, *metricFiles...)) case queryInstantCmd.FullCommand(): os.Exit(QueryInstant(serverURL, httpRoundTripper, *queryInstantExpr, *queryInstantTime, p)) diff --git a/cmd/promtool/metrics.go b/cmd/promtool/metrics.go index c845b5a58..8abe32cf4 100644 --- a/cmd/promtool/metrics.go +++ b/cmd/promtool/metrics.go @@ -32,7 +32,7 @@ import ( ) // Push metrics to a prometheus remote write (for testing purpose only). -func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, jobLabel string, files ...string) int { +func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, labels map[string]string, files ...string) int { failed := false addressURL, err := url.Parse(url.String()) @@ -76,32 +76,32 @@ func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[strin if file == "" { data, err = io.ReadAll(os.Stdin) if err != nil { - fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, " FAILED:", err) failed = true break } - fmt.Printf("Parsing stdin\n") + fmt.Printf("Parsing input from stdin\n") } else { data, err = os.ReadFile(file) if err != nil { - fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, " FAILED:", err) failed = true continue } - fmt.Printf("Parsing metric file %s\n", file) + fmt.Printf("Parsing input from metric file %s\n", file) } - metricsData, err := fmtutil.ParseMetricsTextAndFormat(bytes.NewReader(data), jobLabel) + metricsData, err := fmtutil.ParseMetricsTextAndFormat(bytes.NewReader(data), labels) if err != nil { - fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, " FAILED:", err) failed = true continue } raw, err := metricsData.Marshal() if err != nil { - fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, " FAILED:", err) failed = true continue } @@ -110,11 +110,16 @@ func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[strin compressed := snappy.Encode(nil, raw) err = client.Store(context.Background(), compressed) if err != nil { - fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, " FAILED:", err) failed = true continue } - fmt.Printf("Successfully pushed metric file %s\n", file) + + if file == "" { + fmt.Printf(" SUCCESS: metric pushed to remote write.\n") + } else { + fmt.Printf(" SUCCESS: metric file %s pushed to remote write.\n", file) + } } if failed { diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 024c71e51..c78900b99 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -398,7 +398,7 @@ Push metrics to a prometheus remote write (for testing purpose only). | Flag | Description | Default | | --- | --- | --- | -| --job-label | Job label to attach to metrics. | `promtool` | +| --label | Label to attach to metrics. Can be specified multiple times. | `job=promtool` | | --timeout | The time to wait for pushing metrics. | `30s` | | --header | Prometheus remote write header. | | diff --git a/util/fmtutil/format.go b/util/fmtutil/format.go index 9a06d6bb1..b5bb9469c 100644 --- a/util/fmtutil/format.go +++ b/util/fmtutil/format.go @@ -38,7 +38,7 @@ var MetricMetadataTypeValue = map[string]int32{ } // FormatMetrics convert metric family to a writerequest. -func FormatMetrics(mf map[string]*dto.MetricFamily, jobLabel string) (*prompb.WriteRequest, error) { +func FormatMetrics(mf map[string]*dto.MetricFamily, extraLabels map[string]string) (*prompb.WriteRequest, error) { wr := &prompb.WriteRequest{} // build metric list @@ -63,10 +63,15 @@ func FormatMetrics(mf map[string]*dto.MetricFamily, jobLabel string) (*prompb.Wr var timeserie prompb.TimeSeries // build labels map - labels := make(map[string]string, len(metric.Label)+2) + labels := make(map[string]string, len(metric.Label)+len(extraLabels)) labels[model.MetricNameLabel] = metricName - labels[model.JobLabel] = jobLabel + // add extra labels + for key, value := range extraLabels { + labels[key] = value + } + + // add metric labels for _, label := range metric.Label { labelname := label.GetName() if labelname == model.JobLabel { @@ -133,10 +138,10 @@ func ParseMetricsTextReader(input io.Reader) (map[string]*dto.MetricFamily, erro } // ParseMetricsTextAndFormat return the data in the expected prometheus metrics write request format. -func ParseMetricsTextAndFormat(input io.Reader, jobLabel string) (*prompb.WriteRequest, error) { +func ParseMetricsTextAndFormat(input io.Reader, labels map[string]string) (*prompb.WriteRequest, error) { mf, err := ParseMetricsTextReader(input) if err != nil { return nil, err } - return FormatMetrics(mf, jobLabel) + return FormatMetrics(mf, labels) } diff --git a/util/fmtutil/format_test.go b/util/fmtutil/format_test.go index ef3b7fcd4..9deed2de9 100644 --- a/util/fmtutil/format_test.go +++ b/util/fmtutil/format_test.go @@ -63,8 +63,9 @@ func TestParseMetricsTextAndFormat(t *testing.T) { test_metric1{b="c",baz="qux",d="e",foo="bar"} 1 1 test_metric1{b="c",baz="qux",d="e",foo="bar"} 2 1 `)) + labels := map[string]string{"job": "promtool"} - expected, err := ParseMetricsTextAndFormat(input, "promtool") + expected, err := ParseMetricsTextAndFormat(input, labels) require.NoError(t, err) require.Equal(t, writeRequestFixture, expected) From 37e5249e33e4c2d7bc49c4ac587faf12eb6785b7 Mon Sep 17 00:00:00 2001 From: zenador Date: Wed, 24 May 2023 19:00:21 +0800 Subject: [PATCH 156/632] Use DefaultSamplesPerChunk in tsdb (#12387) Signed-off-by: Jeanette Tan --- tsdb/db.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tsdb/db.go b/tsdb/db.go index a0d0a4b26..12974150b 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -640,6 +640,9 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) { if opts.HeadChunksWriteQueueSize < 0 { opts.HeadChunksWriteQueueSize = chunks.DefaultWriteQueueSize } + if opts.SamplesPerChunk <= 0 { + opts.SamplesPerChunk = DefaultSamplesPerChunk + } if opts.MaxBlockChunkSegmentSize <= 0 { opts.MaxBlockChunkSegmentSize = chunks.DefaultChunkSegmentSize } From 89af3517304bcd48a8f20686c0e3317544b6985d Mon Sep 17 00:00:00 2001 From: Justin Lei <97976793+leizor@users.noreply.github.com> Date: Thu, 25 May 2023 02:18:41 -0700 Subject: [PATCH 157/632] Remove samplesPerChunk from memSeries (#12390) Signed-off-by: Justin Lei --- tsdb/head.go | 14 +++++----- tsdb/head_append.go | 24 ++++++++--------- tsdb/head_test.go | 66 ++++++++++++++++++++++----------------------- tsdb/head_wal.go | 6 ++--- 4 files changed, 54 insertions(+), 56 deletions(-) diff --git a/tsdb/head.go b/tsdb/head.go index f094b3662..a1d61fd6a 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -1614,7 +1614,7 @@ func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool, e func (h *Head) getOrCreateWithID(id chunks.HeadSeriesRef, hash uint64, lset labels.Labels) (*memSeries, bool, error) { s, created, err := h.series.getOrSet(hash, lset, func() *memSeries { - return newMemSeries(lset, id, h.opts.IsolationDisabled, h.opts.SamplesPerChunk) + return newMemSeries(lset, id, h.opts.IsolationDisabled) }) if err != nil { return nil, false, err @@ -1922,8 +1922,7 @@ type memSeries struct { mmMaxTime int64 // Max time of any mmapped chunk, only used during WAL replay. - samplesPerChunk int // Target number of samples per chunk. - nextAt int64 // Timestamp at which to cut the next chunk. + nextAt int64 // Timestamp at which to cut the next chunk. // We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates. lastValue float64 @@ -1951,12 +1950,11 @@ type memSeriesOOOFields struct { firstOOOChunkID chunks.HeadChunkID // HeadOOOChunkID for oooMmappedChunks[0]. } -func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, isolationDisabled bool, samplesPerChunk int) *memSeries { +func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, isolationDisabled bool) *memSeries { s := &memSeries{ - lset: lset, - ref: id, - nextAt: math.MinInt64, - samplesPerChunk: samplesPerChunk, + lset: lset, + ref: id, + nextAt: math.MinInt64, } if !isolationDisabled { s.txs = newTxRing(4) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 060d32b7f..44847dceb 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -987,7 +987,7 @@ func (a *headAppender) Commit() (err error) { samplesAppended-- } default: - ok, chunkCreated = series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper, chunkRange) + ok, chunkCreated = series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper, chunkRange, a.head.opts.SamplesPerChunk) if ok { if s.T < inOrderMint { inOrderMint = s.T @@ -1016,7 +1016,7 @@ func (a *headAppender) Commit() (err error) { for i, s := range a.histograms { series = a.histogramSeries[i] series.Lock() - ok, chunkCreated := series.appendHistogram(s.T, s.H, a.appendID, a.head.chunkDiskMapper, chunkRange) + ok, chunkCreated := series.appendHistogram(s.T, s.H, a.appendID, a.head.chunkDiskMapper, chunkRange, a.head.opts.SamplesPerChunk) series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) series.pendingCommit = false series.Unlock() @@ -1042,7 +1042,7 @@ func (a *headAppender) Commit() (err error) { for i, s := range a.floatHistograms { series = a.floatHistogramSeries[i] series.Lock() - ok, chunkCreated := series.appendFloatHistogram(s.T, s.FH, a.appendID, a.head.chunkDiskMapper, chunkRange) + ok, chunkCreated := series.appendFloatHistogram(s.T, s.FH, a.appendID, a.head.chunkDiskMapper, chunkRange, a.head.opts.SamplesPerChunk) series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) series.pendingCommit = false series.Unlock() @@ -1122,8 +1122,8 @@ func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDisk // the appendID for isolation. (The appendID can be zero, which results in no // isolation for this append.) // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. -func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) { - c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, chunkDiskMapper, chunkRange) +func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, samplesPerChunk int) (sampleInOrder, chunkCreated bool) { + c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, chunkDiskMapper, chunkRange, samplesPerChunk) if !sampleInOrder { return sampleInOrder, chunkCreated } @@ -1144,7 +1144,7 @@ func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper // appendHistogram adds the histogram. // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. -func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) { +func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, samplesPerChunk int) (sampleInOrder, chunkCreated bool) { // Head controls the execution of recoding, so that we own the proper // chunk reference afterwards. We check for Appendable from appender before // appendPreprocessor because in case it ends up creating a new chunk, @@ -1157,7 +1157,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui pMergedSpans, nMergedSpans []histogram.Span okToAppend, counterReset, gauge bool ) - c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange) + c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange, samplesPerChunk) if !sampleInOrder { return sampleInOrder, chunkCreated } @@ -1238,7 +1238,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui // appendFloatHistogram adds the float histogram. // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. -func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) { +func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, samplesPerChunk int) (sampleInOrder, chunkCreated bool) { // Head controls the execution of recoding, so that we own the proper // chunk reference afterwards. We check for Appendable from appender before // appendPreprocessor because in case it ends up creating a new chunk, @@ -1251,7 +1251,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, pMergedSpans, nMergedSpans []histogram.Span okToAppend, counterReset, gauge bool ) - c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange) + c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange, samplesPerChunk) if !sampleInOrder { return sampleInOrder, chunkCreated } @@ -1334,7 +1334,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. // This should be called only when appending data. func (s *memSeries) appendPreprocessor( - t int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, + t int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, samplesPerChunk int, ) (c *memChunk, sampleInOrder, chunkCreated bool) { c = s.head() @@ -1372,7 +1372,7 @@ func (s *memSeries) appendPreprocessor( // for this chunk that will try to make samples equally distributed within // the remaining chunks in the current chunk range. // At latest it must happen at the timestamp set when the chunk was cut. - if numSamples == s.samplesPerChunk/4 { + if numSamples == samplesPerChunk/4 { s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, s.nextAt) } // If numSamples > samplesPerChunk*2 then our previous prediction was invalid, @@ -1380,7 +1380,7 @@ func (s *memSeries) appendPreprocessor( // Since we assume that the rate is higher, we're being conservative and cutting at 2*samplesPerChunk // as we expect more chunks to come. // Note that next chunk will have its nextAt recalculated for the new rate. - if t >= s.nextAt || numSamples >= s.samplesPerChunk*2 { + if t >= s.nextAt || numSamples >= samplesPerChunk*2 { c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange) chunkCreated = true } diff --git a/tsdb/head_test.go b/tsdb/head_test.go index af3df378e..14468e071 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -285,8 +285,8 @@ func BenchmarkLoadWAL(b *testing.B) { require.NoError(b, err) for k := 0; k < c.batches*c.seriesPerBatch; k++ { // Create one mmapped chunk per series, with one sample at the given time. - s := newMemSeries(labels.Labels{}, chunks.HeadSeriesRef(k)*101, defaultIsolationDisabled, DefaultSamplesPerChunk) - s.append(c.mmappedChunkT, 42, 0, chunkDiskMapper, c.mmappedChunkT) + s := newMemSeries(labels.Labels{}, chunks.HeadSeriesRef(k)*101, defaultIsolationDisabled) + s.append(c.mmappedChunkT, 42, 0, chunkDiskMapper, c.mmappedChunkT, DefaultSamplesPerChunk) s.mmapCurrentHeadChunk(chunkDiskMapper) } require.NoError(b, chunkDiskMapper.Close()) @@ -807,10 +807,10 @@ func TestMemSeries_truncateChunks(t *testing.T) { }, } - s := newMemSeries(labels.FromStrings("a", "b"), 1, defaultIsolationDisabled, DefaultSamplesPerChunk) + s := newMemSeries(labels.FromStrings("a", "b"), 1, defaultIsolationDisabled) for i := 0; i < 4000; i += 5 { - ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange) + ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "sample append failed") } @@ -1338,24 +1338,24 @@ func TestMemSeries_append(t *testing.T) { }() const chunkRange = 500 - s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled, DefaultSamplesPerChunk) + s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) // Add first two samples at the very end of a chunk range and the next two // on and after it. // New chunk must correctly be cut at 1000. - ok, chunkCreated := s.append(998, 1, 0, chunkDiskMapper, chunkRange) + ok, chunkCreated := s.append(998, 1, 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "append failed") require.True(t, chunkCreated, "first sample created chunk") - ok, chunkCreated = s.append(999, 2, 0, chunkDiskMapper, chunkRange) + ok, chunkCreated = s.append(999, 2, 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "append failed") require.False(t, chunkCreated, "second sample should use same chunk") - ok, chunkCreated = s.append(1000, 3, 0, chunkDiskMapper, chunkRange) + ok, chunkCreated = s.append(1000, 3, 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "append failed") require.True(t, chunkCreated, "expected new chunk on boundary") - ok, chunkCreated = s.append(1001, 4, 0, chunkDiskMapper, chunkRange) + ok, chunkCreated = s.append(1001, 4, 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "append failed") require.False(t, chunkCreated, "second sample should use same chunk") @@ -1368,7 +1368,7 @@ func TestMemSeries_append(t *testing.T) { // Fill the range [1000,2000) with many samples. Intermediate chunks should be cut // at approximately 120 samples per chunk. for i := 1; i < 1000; i++ { - ok, _ := s.append(1001+int64(i), float64(i), 0, chunkDiskMapper, chunkRange) + ok, _ := s.append(1001+int64(i), float64(i), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "append failed") } @@ -1392,7 +1392,7 @@ func TestMemSeries_appendHistogram(t *testing.T) { }() chunkRange := int64(1000) - s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled, DefaultSamplesPerChunk) + s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) histograms := tsdbutil.GenerateTestHistograms(4) histogramWithOneMoreBucket := histograms[3].Copy() @@ -1404,19 +1404,19 @@ func TestMemSeries_appendHistogram(t *testing.T) { // Add first two samples at the very end of a chunk range and the next two // on and after it. // New chunk must correctly be cut at 1000. - ok, chunkCreated := s.appendHistogram(998, histograms[0], 0, chunkDiskMapper, chunkRange) + ok, chunkCreated := s.appendHistogram(998, histograms[0], 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "append failed") require.True(t, chunkCreated, "first sample created chunk") - ok, chunkCreated = s.appendHistogram(999, histograms[1], 0, chunkDiskMapper, chunkRange) + ok, chunkCreated = s.appendHistogram(999, histograms[1], 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "append failed") require.False(t, chunkCreated, "second sample should use same chunk") - ok, chunkCreated = s.appendHistogram(1000, histograms[2], 0, chunkDiskMapper, chunkRange) + ok, chunkCreated = s.appendHistogram(1000, histograms[2], 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "append failed") require.True(t, chunkCreated, "expected new chunk on boundary") - ok, chunkCreated = s.appendHistogram(1001, histograms[3], 0, chunkDiskMapper, chunkRange) + ok, chunkCreated = s.appendHistogram(1001, histograms[3], 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "append failed") require.False(t, chunkCreated, "second sample should use same chunk") @@ -1426,7 +1426,7 @@ func TestMemSeries_appendHistogram(t *testing.T) { require.Equal(t, int64(1000), s.headChunk.minTime, "wrong chunk range") require.Equal(t, int64(1001), s.headChunk.maxTime, "wrong chunk range") - ok, chunkCreated = s.appendHistogram(1002, histogramWithOneMoreBucket, 0, chunkDiskMapper, chunkRange) + ok, chunkCreated = s.appendHistogram(1002, histogramWithOneMoreBucket, 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "append failed") require.False(t, chunkCreated, "third sample should trigger a re-encoded chunk") @@ -1448,7 +1448,7 @@ func TestMemSeries_append_atVariableRate(t *testing.T) { }) chunkRange := DefaultBlockDuration - s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled, DefaultSamplesPerChunk) + s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) // At this slow rate, we will fill the chunk in two block durations. slowRate := (DefaultBlockDuration * 2) / samplesPerChunk @@ -1456,7 +1456,7 @@ func TestMemSeries_append_atVariableRate(t *testing.T) { var nextTs int64 var totalAppendedSamples int for i := 0; i < samplesPerChunk/4; i++ { - ok, _ := s.append(nextTs, float64(i), 0, chunkDiskMapper, chunkRange) + ok, _ := s.append(nextTs, float64(i), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.Truef(t, ok, "slow sample %d was not appended", i) nextTs += slowRate totalAppendedSamples++ @@ -1465,12 +1465,12 @@ func TestMemSeries_append_atVariableRate(t *testing.T) { // Suddenly, the rate increases and we receive a sample every millisecond. for i := 0; i < math.MaxUint16; i++ { - ok, _ := s.append(nextTs, float64(i), 0, chunkDiskMapper, chunkRange) + ok, _ := s.append(nextTs, float64(i), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.Truef(t, ok, "quick sample %d was not appended", i) nextTs++ totalAppendedSamples++ } - ok, chunkCreated := s.append(DefaultBlockDuration, float64(0), 0, chunkDiskMapper, chunkRange) + ok, chunkCreated := s.append(DefaultBlockDuration, float64(0), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "new chunk sample was not appended") require.True(t, chunkCreated, "sample at block duration timestamp should create a new chunk") @@ -1495,18 +1495,18 @@ func TestGCChunkAccess(t *testing.T) { s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1")) // Appending 2 samples for the first chunk. - ok, chunkCreated := s.append(0, 0, 0, h.chunkDiskMapper, chunkRange) + ok, chunkCreated := s.append(0, 0, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunks was not created") - ok, chunkCreated = s.append(999, 999, 0, h.chunkDiskMapper, chunkRange) + ok, chunkCreated = s.append(999, 999, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunks was created") // A new chunks should be created here as it's beyond the chunk range. - ok, chunkCreated = s.append(1000, 1000, 0, h.chunkDiskMapper, chunkRange) + ok, chunkCreated = s.append(1000, 1000, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunks was not created") - ok, chunkCreated = s.append(1999, 1999, 0, h.chunkDiskMapper, chunkRange) + ok, chunkCreated = s.append(1999, 1999, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunks was created") @@ -1548,18 +1548,18 @@ func TestGCSeriesAccess(t *testing.T) { s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1")) // Appending 2 samples for the first chunk. - ok, chunkCreated := s.append(0, 0, 0, h.chunkDiskMapper, chunkRange) + ok, chunkCreated := s.append(0, 0, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunks was not created") - ok, chunkCreated = s.append(999, 999, 0, h.chunkDiskMapper, chunkRange) + ok, chunkCreated = s.append(999, 999, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunks was created") // A new chunks should be created here as it's beyond the chunk range. - ok, chunkCreated = s.append(1000, 1000, 0, h.chunkDiskMapper, chunkRange) + ok, chunkCreated = s.append(1000, 1000, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunks was not created") - ok, chunkCreated = s.append(1999, 1999, 0, h.chunkDiskMapper, chunkRange) + ok, chunkCreated = s.append(1999, 1999, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunks was created") @@ -1795,10 +1795,10 @@ func TestHeadReadWriterRepair(t *testing.T) { require.True(t, created, "series was not created") for i := 0; i < 7; i++ { - ok, chunkCreated := s.append(int64(i*chunkRange), float64(i*chunkRange), 0, h.chunkDiskMapper, chunkRange) + ok, chunkCreated := s.append(int64(i*chunkRange), float64(i*chunkRange), 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunk was not created") - ok, chunkCreated = s.append(int64(i*chunkRange)+chunkRange-1, float64(i*chunkRange), 0, h.chunkDiskMapper, chunkRange) + ok, chunkCreated = s.append(int64(i*chunkRange)+chunkRange-1, float64(i*chunkRange), 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunk was created") h.chunkDiskMapper.CutNewFile() @@ -2146,7 +2146,7 @@ func TestIsolationAppendIDZeroIsNoop(t *testing.T) { s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1")) - ok, _ := s.append(0, 0, 0, h.chunkDiskMapper, h.chunkRange.Load()) + ok, _ := s.append(0, 0, 0, h.chunkDiskMapper, h.chunkRange.Load(), DefaultSamplesPerChunk) require.True(t, ok, "Series append failed.") require.Equal(t, 0, s.txs.txIDCount, "Series should not have an appendID after append with appendID=0.") } @@ -2610,10 +2610,10 @@ func TestIteratorSeekIntoBuffer(t *testing.T) { }() const chunkRange = 500 - s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled, DefaultSamplesPerChunk) + s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) for i := 0; i < 7; i++ { - ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange) + ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) require.True(t, ok, "sample append failed") } diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 6e81f1793..9741d1da0 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -588,7 +588,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp if s.T <= ms.mmMaxTime { continue } - if _, chunkCreated := ms.append(s.T, s.V, 0, h.chunkDiskMapper, chunkRange); chunkCreated { + if _, chunkCreated := ms.append(s.T, s.V, 0, h.chunkDiskMapper, chunkRange, h.opts.SamplesPerChunk); chunkCreated { h.metrics.chunksCreated.Inc() h.metrics.chunks.Inc() } @@ -618,9 +618,9 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp } var chunkCreated bool if s.h != nil { - _, chunkCreated = ms.appendHistogram(s.t, s.h, 0, h.chunkDiskMapper, chunkRange) + _, chunkCreated = ms.appendHistogram(s.t, s.h, 0, h.chunkDiskMapper, chunkRange, h.opts.SamplesPerChunk) } else { - _, chunkCreated = ms.appendFloatHistogram(s.t, s.fh, 0, h.chunkDiskMapper, chunkRange) + _, chunkCreated = ms.appendFloatHistogram(s.t, s.fh, 0, h.chunkDiskMapper, chunkRange, h.opts.SamplesPerChunk) } if chunkCreated { h.metrics.chunksCreated.Inc() From cb045c0e4b94bbf3eee174d91b5ef2b8553948d5 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Thu, 25 May 2023 11:49:43 +0200 Subject: [PATCH 158/632] Fix wording from "jitterSeed" -> "offsetSeed" for server-wide scrape offsets In digital communication, "jitter" usually refers to how much a signal deviates from true periodicity, see https://en.wikipedia.org/wiki/Jitter. The way we are using the "jitterSeed" in Prometheus does not affect the true periodicity at all, but just introduces a constant phase shift (or offset) within the period. So it would be more correct and less confusing to call the "jitterSeed" an "offsetSeed" instead. Signed-off-by: Julius Volz --- scrape/manager.go | 12 ++++++------ scrape/manager_test.go | 18 +++++++++--------- scrape/scrape.go | 14 +++++++------- scrape/target_test.go | 4 ++-- 4 files changed, 24 insertions(+), 24 deletions(-) diff --git a/scrape/manager.go b/scrape/manager.go index d75fe30cf..d7cf6792c 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -150,7 +150,7 @@ type Manager struct { append storage.Appendable graceShut chan struct{} - jitterSeed uint64 // Global jitterSeed seed is used to spread scrape workload across HA setup. + offsetSeed uint64 // Global offsetSeed seed is used to spread scrape workload across HA setup. mtxScrape sync.Mutex // Guards the fields below. scrapeConfigs map[string]*config.ScrapeConfig scrapePools map[string]*scrapePool @@ -214,7 +214,7 @@ func (m *Manager) reload() { level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName) continue } - sp, err := newScrapePool(scrapeConfig, m.append, m.jitterSeed, log.With(m.logger, "scrape_pool", setName), m.opts) + sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, log.With(m.logger, "scrape_pool", setName), m.opts) if err != nil { level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName) continue @@ -234,8 +234,8 @@ func (m *Manager) reload() { wg.Wait() } -// setJitterSeed calculates a global jitterSeed per server relying on extra label set. -func (m *Manager) setJitterSeed(labels labels.Labels) error { +// setOffsetSeed calculates a global offsetSeed per server relying on extra label set. +func (m *Manager) setOffsetSeed(labels labels.Labels) error { h := fnv.New64a() hostname, err := osutil.GetFQDN() if err != nil { @@ -244,7 +244,7 @@ func (m *Manager) setJitterSeed(labels labels.Labels) error { if _, err := fmt.Fprintf(h, "%s%s", hostname, labels.String()); err != nil { return err } - m.jitterSeed = h.Sum64() + m.offsetSeed = h.Sum64() return nil } @@ -281,7 +281,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { } m.scrapeConfigs = c - if err := m.setJitterSeed(cfg.GlobalConfig.ExternalLabels); err != nil { + if err := m.setOffsetSeed(cfg.GlobalConfig.ExternalLabels); err != nil { return err } diff --git a/scrape/manager_test.go b/scrape/manager_test.go index d05d25fa2..50f632013 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -596,7 +596,7 @@ func TestManagerTargetsUpdates(t *testing.T) { } } -func TestSetJitter(t *testing.T) { +func TestSetOffsetSeed(t *testing.T) { getConfig := func(prometheus string) *config.Config { cfgText := ` global: @@ -617,24 +617,24 @@ global: // Load the first config. cfg1 := getConfig("ha1") - if err := scrapeManager.setJitterSeed(cfg1.GlobalConfig.ExternalLabels); err != nil { + if err := scrapeManager.setOffsetSeed(cfg1.GlobalConfig.ExternalLabels); err != nil { t.Error(err) } - jitter1 := scrapeManager.jitterSeed + offsetSeed1 := scrapeManager.offsetSeed - if jitter1 == 0 { - t.Error("Jitter has to be a hash of uint64") + if offsetSeed1 == 0 { + t.Error("Offset seed has to be a hash of uint64") } // Load the first config. cfg2 := getConfig("ha2") - if err := scrapeManager.setJitterSeed(cfg2.GlobalConfig.ExternalLabels); err != nil { + if err := scrapeManager.setOffsetSeed(cfg2.GlobalConfig.ExternalLabels); err != nil { t.Error(err) } - jitter2 := scrapeManager.jitterSeed + offsetSeed2 := scrapeManager.offsetSeed - if jitter1 == jitter2 { - t.Error("Jitter should not be the same on different set of external labels") + if offsetSeed1 == offsetSeed2 { + t.Error("Offset seed should not be the same on different set of external labels") } } diff --git a/scrape/scrape.go b/scrape/scrape.go index a97cbf539..8c4cc51e7 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -279,7 +279,7 @@ const maxAheadTime = 10 * time.Minute // returning an empty label set is interpreted as "drop" type labelsMutator func(labels.Labels) labels.Labels -func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, options *Options) (*scrapePool, error) { +func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, options *Options) (*scrapePool, error) { targetScrapePools.Inc() if logger == nil { logger = log.NewNopLogger() @@ -325,7 +325,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, opts.target) }, func(ctx context.Context) storage.Appender { return app.Appender(ctx) }, cache, - jitterSeed, + offsetSeed, opts.honorTimestamps, opts.sampleLimit, opts.bucketLimit, @@ -775,7 +775,7 @@ func appender(app storage.Appender, sampleLimit, bucketLimit int) storage.Append type scraper interface { scrape(ctx context.Context, w io.Writer) (string, error) Report(start time.Time, dur time.Duration, err error) - offset(interval time.Duration, jitterSeed uint64) time.Duration + offset(interval time.Duration, offsetSeed uint64) time.Duration } // targetScraper implements the scraper interface for a target. @@ -891,7 +891,7 @@ type scrapeLoop struct { cache *scrapeCache lastScrapeSize int buffers *pool.Pool - jitterSeed uint64 + offsetSeed uint64 honorTimestamps bool forcedErr error forcedErrMtx sync.Mutex @@ -1175,7 +1175,7 @@ func newScrapeLoop(ctx context.Context, reportSampleMutator labelsMutator, appender func(ctx context.Context) storage.Appender, cache *scrapeCache, - jitterSeed uint64, + offsetSeed uint64, honorTimestamps bool, sampleLimit int, bucketLimit int, @@ -1217,7 +1217,7 @@ func newScrapeLoop(ctx context.Context, sampleMutator: sampleMutator, reportSampleMutator: reportSampleMutator, stopped: make(chan struct{}), - jitterSeed: jitterSeed, + offsetSeed: offsetSeed, l: l, parentCtx: ctx, appenderCtx: appenderCtx, @@ -1238,7 +1238,7 @@ func newScrapeLoop(ctx context.Context, func (sl *scrapeLoop) run(errc chan<- error) { select { - case <-time.After(sl.scraper.offset(sl.interval, sl.jitterSeed)): + case <-time.After(sl.scraper.offset(sl.interval, sl.offsetSeed)): // Continue after a scraping offset. case <-sl.ctx.Done(): close(sl.stopped) diff --git a/scrape/target_test.go b/scrape/target_test.go index 12d3b5a4d..4f0c840cd 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -59,7 +59,7 @@ func TestTargetLabels(t *testing.T) { func TestTargetOffset(t *testing.T) { interval := 10 * time.Second - jitter := uint64(0) + offsetSeed := uint64(0) offsets := make([]time.Duration, 10000) @@ -68,7 +68,7 @@ func TestTargetOffset(t *testing.T) { target := newTestTarget("example.com:80", 0, labels.FromStrings( "label", fmt.Sprintf("%d", i), )) - offsets[i] = target.offset(interval, jitter) + offsets[i] = target.offset(interval, offsetSeed) } // Put the offsets into buckets and validate that they are all From ce236c61ab2e0cb6a9d5ef6a2e2ad720746861a0 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Thu, 25 May 2023 13:47:34 +0200 Subject: [PATCH 159/632] Mark 2.45 as LTS As the 2.37 LTS is going EOL in July 2023, let's mark 2.45 as LTS. I have synced with Jesus about this. He will bootstrap the release and after a few week I will do the maintenance for the lifetime of the LTS. Signed-off-by: Julien Pivotto --- RELEASE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index d7f24dabd..f5c907fe9 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -49,7 +49,7 @@ Release cadence of first pre-releases being cut is 6 weeks. | v2.42 | 2023-01-25 | Kemal Akkoyun (GitHub: @kakkoyun) | | v2.43 | 2023-03-08 | Julien Pivotto (GitHub: @roidelapluie) | | v2.44 | 2023-04-19 | Bryan Boreham (GitHub: @bboreham) | -| v2.45 | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) | +| v2.45 LTS | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) | | v2.46 | 2023-07-12 | **searching for volunteer** | If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. From 4c4454e4c9e12ad08e765335ba11199f296ad986 Mon Sep 17 00:00:00 2001 From: Justin Lei Date: Thu, 25 May 2023 13:12:32 -0700 Subject: [PATCH 160/632] Group args to append to memSeries in chunkOpts Signed-off-by: Justin Lei --- tsdb/head_append.go | 37 ++++++++++----- tsdb/head_test.go | 113 +++++++++++++++++++++++++++++++------------- tsdb/head_wal.go | 12 +++-- 3 files changed, 113 insertions(+), 49 deletions(-) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 44847dceb..a77c8a4eb 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -881,9 +881,13 @@ func (a *headAppender) Commit() (err error) { oooMmapMarkers map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef oooRecords [][]byte oooCapMax = a.head.opts.OutOfOrderCapMax.Load() - chunkRange = a.head.chunkRange.Load() series *memSeries - enc record.Encoder + appendChunkOpts = chunkOpts{ + chunkDiskMapper: a.head.chunkDiskMapper, + chunkRange: a.head.chunkRange.Load(), + samplesPerChunk: a.head.opts.SamplesPerChunk, + } + enc record.Encoder ) defer func() { for i := range oooRecords { @@ -987,7 +991,7 @@ func (a *headAppender) Commit() (err error) { samplesAppended-- } default: - ok, chunkCreated = series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper, chunkRange, a.head.opts.SamplesPerChunk) + ok, chunkCreated = series.append(s.T, s.V, a.appendID, appendChunkOpts) if ok { if s.T < inOrderMint { inOrderMint = s.T @@ -1016,7 +1020,7 @@ func (a *headAppender) Commit() (err error) { for i, s := range a.histograms { series = a.histogramSeries[i] series.Lock() - ok, chunkCreated := series.appendHistogram(s.T, s.H, a.appendID, a.head.chunkDiskMapper, chunkRange, a.head.opts.SamplesPerChunk) + ok, chunkCreated := series.appendHistogram(s.T, s.H, a.appendID, appendChunkOpts) series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) series.pendingCommit = false series.Unlock() @@ -1042,7 +1046,7 @@ func (a *headAppender) Commit() (err error) { for i, s := range a.floatHistograms { series = a.floatHistogramSeries[i] series.Lock() - ok, chunkCreated := series.appendFloatHistogram(s.T, s.FH, a.appendID, a.head.chunkDiskMapper, chunkRange, a.head.opts.SamplesPerChunk) + ok, chunkCreated := series.appendFloatHistogram(s.T, s.FH, a.appendID, appendChunkOpts) series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) series.pendingCommit = false series.Unlock() @@ -1118,12 +1122,19 @@ func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDisk return ok, chunkCreated, mmapRef } +// chunkOpts are chunk-level options that are passed when appending to a memSeries. +type chunkOpts struct { + chunkDiskMapper *chunks.ChunkDiskMapper + chunkRange int64 + samplesPerChunk int +} + // append adds the sample (t, v) to the series. The caller also has to provide // the appendID for isolation. (The appendID can be zero, which results in no // isolation for this append.) // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. -func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, samplesPerChunk int) (sampleInOrder, chunkCreated bool) { - c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, chunkDiskMapper, chunkRange, samplesPerChunk) +func (s *memSeries) append(t int64, v float64, appendID uint64, o chunkOpts) (sampleInOrder, chunkCreated bool) { + c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, o.chunkDiskMapper, o.chunkRange, o.samplesPerChunk) if !sampleInOrder { return sampleInOrder, chunkCreated } @@ -1144,7 +1155,7 @@ func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper // appendHistogram adds the histogram. // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. -func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, samplesPerChunk int) (sampleInOrder, chunkCreated bool) { +func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID uint64, o chunkOpts) (sampleInOrder, chunkCreated bool) { // Head controls the execution of recoding, so that we own the proper // chunk reference afterwards. We check for Appendable from appender before // appendPreprocessor because in case it ends up creating a new chunk, @@ -1157,7 +1168,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui pMergedSpans, nMergedSpans []histogram.Span okToAppend, counterReset, gauge bool ) - c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange, samplesPerChunk) + c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, o.chunkDiskMapper, o.chunkRange, o.samplesPerChunk) if !sampleInOrder { return sampleInOrder, chunkCreated } @@ -1193,7 +1204,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui // - okToAppend and no inserts → Chunk is ready to support our histogram. switch { case !okToAppend || counterReset: - c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange) + c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, o.chunkDiskMapper, o.chunkRange) chunkCreated = true case len(pForwardInserts) > 0 || len(nForwardInserts) > 0: // New buckets have appeared. We need to recode all @@ -1238,7 +1249,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui // appendFloatHistogram adds the float histogram. // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. -func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, samplesPerChunk int) (sampleInOrder, chunkCreated bool) { +func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, appendID uint64, o chunkOpts) (sampleInOrder, chunkCreated bool) { // Head controls the execution of recoding, so that we own the proper // chunk reference afterwards. We check for Appendable from appender before // appendPreprocessor because in case it ends up creating a new chunk, @@ -1251,7 +1262,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, pMergedSpans, nMergedSpans []histogram.Span okToAppend, counterReset, gauge bool ) - c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange, samplesPerChunk) + c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, o.chunkDiskMapper, o.chunkRange, o.samplesPerChunk) if !sampleInOrder { return sampleInOrder, chunkCreated } @@ -1287,7 +1298,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, // - okToAppend and no inserts → Chunk is ready to support our histogram. switch { case !okToAppend || counterReset: - c = s.cutNewHeadChunk(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange) + c = s.cutNewHeadChunk(t, chunkenc.EncFloatHistogram, o.chunkDiskMapper, o.chunkRange) chunkCreated = true case len(pForwardInserts) > 0 || len(nForwardInserts) > 0: // New buckets have appeared. We need to recode all diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 14468e071..8eb218b5a 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -283,10 +283,15 @@ func BenchmarkLoadWAL(b *testing.B) { if c.mmappedChunkT != 0 { chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, mmappedChunksDir(dir), chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize) require.NoError(b, err) + cOpts := chunkOpts{ + chunkDiskMapper: chunkDiskMapper, + chunkRange: c.mmappedChunkT, + samplesPerChunk: DefaultSamplesPerChunk, + } for k := 0; k < c.batches*c.seriesPerBatch; k++ { // Create one mmapped chunk per series, with one sample at the given time. s := newMemSeries(labels.Labels{}, chunks.HeadSeriesRef(k)*101, defaultIsolationDisabled) - s.append(c.mmappedChunkT, 42, 0, chunkDiskMapper, c.mmappedChunkT, DefaultSamplesPerChunk) + s.append(c.mmappedChunkT, 42, 0, cOpts) s.mmapCurrentHeadChunk(chunkDiskMapper) } require.NoError(b, chunkDiskMapper.Close()) @@ -799,7 +804,11 @@ func TestMemSeries_truncateChunks(t *testing.T) { defer func() { require.NoError(t, chunkDiskMapper.Close()) }() - const chunkRange = 2000 + cOpts := chunkOpts{ + chunkDiskMapper: chunkDiskMapper, + chunkRange: 2000, + samplesPerChunk: DefaultSamplesPerChunk, + } memChunkPool := sync.Pool{ New: func() interface{} { @@ -810,7 +819,7 @@ func TestMemSeries_truncateChunks(t *testing.T) { s := newMemSeries(labels.FromStrings("a", "b"), 1, defaultIsolationDisabled) for i := 0; i < 4000; i += 5 { - ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, _ := s.append(int64(i), float64(i), 0, cOpts) require.True(t, ok, "sample append failed") } @@ -1336,26 +1345,30 @@ func TestMemSeries_append(t *testing.T) { defer func() { require.NoError(t, chunkDiskMapper.Close()) }() - const chunkRange = 500 + cOpts := chunkOpts{ + chunkDiskMapper: chunkDiskMapper, + chunkRange: 500, + samplesPerChunk: DefaultSamplesPerChunk, + } s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) // Add first two samples at the very end of a chunk range and the next two // on and after it. // New chunk must correctly be cut at 1000. - ok, chunkCreated := s.append(998, 1, 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated := s.append(998, 1, 0, cOpts) require.True(t, ok, "append failed") require.True(t, chunkCreated, "first sample created chunk") - ok, chunkCreated = s.append(999, 2, 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.append(999, 2, 0, cOpts) require.True(t, ok, "append failed") require.False(t, chunkCreated, "second sample should use same chunk") - ok, chunkCreated = s.append(1000, 3, 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.append(1000, 3, 0, cOpts) require.True(t, ok, "append failed") require.True(t, chunkCreated, "expected new chunk on boundary") - ok, chunkCreated = s.append(1001, 4, 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.append(1001, 4, 0, cOpts) require.True(t, ok, "append failed") require.False(t, chunkCreated, "second sample should use same chunk") @@ -1368,7 +1381,7 @@ func TestMemSeries_append(t *testing.T) { // Fill the range [1000,2000) with many samples. Intermediate chunks should be cut // at approximately 120 samples per chunk. for i := 1; i < 1000; i++ { - ok, _ := s.append(1001+int64(i), float64(i), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, _ := s.append(1001+int64(i), float64(i), 0, cOpts) require.True(t, ok, "append failed") } @@ -1390,7 +1403,11 @@ func TestMemSeries_appendHistogram(t *testing.T) { defer func() { require.NoError(t, chunkDiskMapper.Close()) }() - chunkRange := int64(1000) + cOpts := chunkOpts{ + chunkDiskMapper: chunkDiskMapper, + chunkRange: int64(1000), + samplesPerChunk: DefaultSamplesPerChunk, + } s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) @@ -1404,19 +1421,19 @@ func TestMemSeries_appendHistogram(t *testing.T) { // Add first two samples at the very end of a chunk range and the next two // on and after it. // New chunk must correctly be cut at 1000. - ok, chunkCreated := s.appendHistogram(998, histograms[0], 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated := s.appendHistogram(998, histograms[0], 0, cOpts) require.True(t, ok, "append failed") require.True(t, chunkCreated, "first sample created chunk") - ok, chunkCreated = s.appendHistogram(999, histograms[1], 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.appendHistogram(999, histograms[1], 0, cOpts) require.True(t, ok, "append failed") require.False(t, chunkCreated, "second sample should use same chunk") - ok, chunkCreated = s.appendHistogram(1000, histograms[2], 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.appendHistogram(1000, histograms[2], 0, cOpts) require.True(t, ok, "append failed") require.True(t, chunkCreated, "expected new chunk on boundary") - ok, chunkCreated = s.appendHistogram(1001, histograms[3], 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.appendHistogram(1001, histograms[3], 0, cOpts) require.True(t, ok, "append failed") require.False(t, chunkCreated, "second sample should use same chunk") @@ -1426,7 +1443,7 @@ func TestMemSeries_appendHistogram(t *testing.T) { require.Equal(t, int64(1000), s.headChunk.minTime, "wrong chunk range") require.Equal(t, int64(1001), s.headChunk.maxTime, "wrong chunk range") - ok, chunkCreated = s.appendHistogram(1002, histogramWithOneMoreBucket, 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.appendHistogram(1002, histogramWithOneMoreBucket, 0, cOpts) require.True(t, ok, "append failed") require.False(t, chunkCreated, "third sample should trigger a re-encoded chunk") @@ -1446,7 +1463,11 @@ func TestMemSeries_append_atVariableRate(t *testing.T) { t.Cleanup(func() { require.NoError(t, chunkDiskMapper.Close()) }) - chunkRange := DefaultBlockDuration + cOpts := chunkOpts{ + chunkDiskMapper: chunkDiskMapper, + chunkRange: DefaultBlockDuration, + samplesPerChunk: samplesPerChunk, + } s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) @@ -1456,7 +1477,7 @@ func TestMemSeries_append_atVariableRate(t *testing.T) { var nextTs int64 var totalAppendedSamples int for i := 0; i < samplesPerChunk/4; i++ { - ok, _ := s.append(nextTs, float64(i), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, _ := s.append(nextTs, float64(i), 0, cOpts) require.Truef(t, ok, "slow sample %d was not appended", i) nextTs += slowRate totalAppendedSamples++ @@ -1465,12 +1486,12 @@ func TestMemSeries_append_atVariableRate(t *testing.T) { // Suddenly, the rate increases and we receive a sample every millisecond. for i := 0; i < math.MaxUint16; i++ { - ok, _ := s.append(nextTs, float64(i), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, _ := s.append(nextTs, float64(i), 0, cOpts) require.Truef(t, ok, "quick sample %d was not appended", i) nextTs++ totalAppendedSamples++ } - ok, chunkCreated := s.append(DefaultBlockDuration, float64(0), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated := s.append(DefaultBlockDuration, float64(0), 0, cOpts) require.True(t, ok, "new chunk sample was not appended") require.True(t, chunkCreated, "sample at block duration timestamp should create a new chunk") @@ -1490,23 +1511,29 @@ func TestGCChunkAccess(t *testing.T) { require.NoError(t, h.Close()) }() + cOpts := chunkOpts{ + chunkDiskMapper: h.chunkDiskMapper, + chunkRange: chunkRange, + samplesPerChunk: DefaultSamplesPerChunk, + } + h.initTime(0) s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1")) // Appending 2 samples for the first chunk. - ok, chunkCreated := s.append(0, 0, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated := s.append(0, 0, 0, cOpts) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunks was not created") - ok, chunkCreated = s.append(999, 999, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.append(999, 999, 0, cOpts) require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunks was created") // A new chunks should be created here as it's beyond the chunk range. - ok, chunkCreated = s.append(1000, 1000, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.append(1000, 1000, 0, cOpts) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunks was not created") - ok, chunkCreated = s.append(1999, 1999, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.append(1999, 1999, 0, cOpts) require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunks was created") @@ -1543,23 +1570,29 @@ func TestGCSeriesAccess(t *testing.T) { require.NoError(t, h.Close()) }() + cOpts := chunkOpts{ + chunkDiskMapper: h.chunkDiskMapper, + chunkRange: chunkRange, + samplesPerChunk: DefaultSamplesPerChunk, + } + h.initTime(0) s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1")) // Appending 2 samples for the first chunk. - ok, chunkCreated := s.append(0, 0, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated := s.append(0, 0, 0, cOpts) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunks was not created") - ok, chunkCreated = s.append(999, 999, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.append(999, 999, 0, cOpts) require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunks was created") // A new chunks should be created here as it's beyond the chunk range. - ok, chunkCreated = s.append(1000, 1000, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.append(1000, 1000, 0, cOpts) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunks was not created") - ok, chunkCreated = s.append(1999, 1999, 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.append(1999, 1999, 0, cOpts) require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunks was created") @@ -1791,14 +1824,20 @@ func TestHeadReadWriterRepair(t *testing.T) { require.Equal(t, 0.0, prom_testutil.ToFloat64(h.metrics.mmapChunkCorruptionTotal)) require.NoError(t, h.Init(math.MinInt64)) + cOpts := chunkOpts{ + chunkDiskMapper: h.chunkDiskMapper, + chunkRange: chunkRange, + samplesPerChunk: DefaultSamplesPerChunk, + } + s, created, _ := h.getOrCreate(1, labels.FromStrings("a", "1")) require.True(t, created, "series was not created") for i := 0; i < 7; i++ { - ok, chunkCreated := s.append(int64(i*chunkRange), float64(i*chunkRange), 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated := s.append(int64(i*chunkRange), float64(i*chunkRange), 0, cOpts) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunk was not created") - ok, chunkCreated = s.append(int64(i*chunkRange)+chunkRange-1, float64(i*chunkRange), 0, h.chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, chunkCreated = s.append(int64(i*chunkRange)+chunkRange-1, float64(i*chunkRange), 0, cOpts) require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunk was created") h.chunkDiskMapper.CutNewFile() @@ -2144,9 +2183,15 @@ func TestIsolationAppendIDZeroIsNoop(t *testing.T) { h.initTime(0) + cOpts := chunkOpts{ + chunkDiskMapper: h.chunkDiskMapper, + chunkRange: h.chunkRange.Load(), + samplesPerChunk: DefaultSamplesPerChunk, + } + s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1")) - ok, _ := s.append(0, 0, 0, h.chunkDiskMapper, h.chunkRange.Load(), DefaultSamplesPerChunk) + ok, _ := s.append(0, 0, 0, cOpts) require.True(t, ok, "Series append failed.") require.Equal(t, 0, s.txs.txIDCount, "Series should not have an appendID after append with appendID=0.") } @@ -2608,12 +2653,16 @@ func TestIteratorSeekIntoBuffer(t *testing.T) { defer func() { require.NoError(t, chunkDiskMapper.Close()) }() - const chunkRange = 500 + cOpts := chunkOpts{ + chunkDiskMapper: chunkDiskMapper, + chunkRange: 500, + samplesPerChunk: DefaultSamplesPerChunk, + } s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) for i := 0; i < 7; i++ { - ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange, DefaultSamplesPerChunk) + ok, _ := s.append(int64(i), float64(i), 0, cOpts) require.True(t, ok, "sample append failed") } diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 9741d1da0..71120c55e 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -564,7 +564,11 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp minValidTime := h.minValidTime.Load() mint, maxt := int64(math.MaxInt64), int64(math.MinInt64) - chunkRange := h.chunkRange.Load() + appendChunkOpts := chunkOpts{ + chunkDiskMapper: h.chunkDiskMapper, + chunkRange: h.chunkRange.Load(), + samplesPerChunk: h.opts.SamplesPerChunk, + } for in := range wp.input { if in.existingSeries != nil { @@ -588,7 +592,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp if s.T <= ms.mmMaxTime { continue } - if _, chunkCreated := ms.append(s.T, s.V, 0, h.chunkDiskMapper, chunkRange, h.opts.SamplesPerChunk); chunkCreated { + if _, chunkCreated := ms.append(s.T, s.V, 0, appendChunkOpts); chunkCreated { h.metrics.chunksCreated.Inc() h.metrics.chunks.Inc() } @@ -618,9 +622,9 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp } var chunkCreated bool if s.h != nil { - _, chunkCreated = ms.appendHistogram(s.t, s.h, 0, h.chunkDiskMapper, chunkRange, h.opts.SamplesPerChunk) + _, chunkCreated = ms.appendHistogram(s.t, s.h, 0, appendChunkOpts) } else { - _, chunkCreated = ms.appendFloatHistogram(s.t, s.fh, 0, h.chunkDiskMapper, chunkRange, h.opts.SamplesPerChunk) + _, chunkCreated = ms.appendFloatHistogram(s.t, s.fh, 0, appendChunkOpts) } if chunkCreated { h.metrics.chunksCreated.Inc() From e73d8b208408cf5566541a68c610776935f8789d Mon Sep 17 00:00:00 2001 From: Justin Lei Date: Thu, 25 May 2023 13:35:09 -0700 Subject: [PATCH 161/632] Also pass chunkOpts into appendPreprocessor Signed-off-by: Justin Lei --- tsdb/head_append.go | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index a77c8a4eb..3d828d066 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -1134,7 +1134,7 @@ type chunkOpts struct { // isolation for this append.) // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. func (s *memSeries) append(t int64, v float64, appendID uint64, o chunkOpts) (sampleInOrder, chunkCreated bool) { - c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, o.chunkDiskMapper, o.chunkRange, o.samplesPerChunk) + c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, o) if !sampleInOrder { return sampleInOrder, chunkCreated } @@ -1168,7 +1168,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui pMergedSpans, nMergedSpans []histogram.Span okToAppend, counterReset, gauge bool ) - c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, o.chunkDiskMapper, o.chunkRange, o.samplesPerChunk) + c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, o) if !sampleInOrder { return sampleInOrder, chunkCreated } @@ -1262,7 +1262,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, pMergedSpans, nMergedSpans []histogram.Span okToAppend, counterReset, gauge bool ) - c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, o.chunkDiskMapper, o.chunkRange, o.samplesPerChunk) + c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, o) if !sampleInOrder { return sampleInOrder, chunkCreated } @@ -1344,9 +1344,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, // appendPreprocessor takes care of cutting new chunks and m-mapping old chunks. // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. // This should be called only when appending data. -func (s *memSeries) appendPreprocessor( - t int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, samplesPerChunk int, -) (c *memChunk, sampleInOrder, chunkCreated bool) { +func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts) (c *memChunk, sampleInOrder, chunkCreated bool) { c = s.head() if c == nil { @@ -1355,7 +1353,7 @@ func (s *memSeries) appendPreprocessor( return c, false, false } // There is no head chunk in this series yet, create the first chunk for the sample. - c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange) + c = s.cutNewHeadChunk(t, e, o.chunkDiskMapper, o.chunkRange) chunkCreated = true } @@ -1367,7 +1365,7 @@ func (s *memSeries) appendPreprocessor( if c.chunk.Encoding() != e { // The chunk encoding expected by this append is different than the head chunk's // encoding. So we cut a new chunk with the expected encoding. - c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange) + c = s.cutNewHeadChunk(t, e, o.chunkDiskMapper, o.chunkRange) chunkCreated = true } @@ -1376,14 +1374,14 @@ func (s *memSeries) appendPreprocessor( // It could be the new chunk created after reading the chunk snapshot, // hence we fix the minTime of the chunk here. c.minTime = t - s.nextAt = rangeForTimestamp(c.minTime, chunkRange) + s.nextAt = rangeForTimestamp(c.minTime, o.chunkRange) } // If we reach 25% of a chunk's desired sample count, predict an end time // for this chunk that will try to make samples equally distributed within // the remaining chunks in the current chunk range. // At latest it must happen at the timestamp set when the chunk was cut. - if numSamples == samplesPerChunk/4 { + if numSamples == o.samplesPerChunk/4 { s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, s.nextAt) } // If numSamples > samplesPerChunk*2 then our previous prediction was invalid, @@ -1391,8 +1389,8 @@ func (s *memSeries) appendPreprocessor( // Since we assume that the rate is higher, we're being conservative and cutting at 2*samplesPerChunk // as we expect more chunks to come. // Note that next chunk will have its nextAt recalculated for the new rate. - if t >= s.nextAt || numSamples >= samplesPerChunk*2 { - c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange) + if t >= s.nextAt || numSamples >= o.samplesPerChunk*2 { + c = s.cutNewHeadChunk(t, e, o.chunkDiskMapper, o.chunkRange) chunkCreated = true } From a308ea773d124de39958e0b50282115c341dc247 Mon Sep 17 00:00:00 2001 From: marcoderama Date: Fri, 26 May 2023 16:39:55 -0700 Subject: [PATCH 162/632] Update functions.md Fix small typo Signed-off-by: marcoderama --- docs/querying/functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index c8831c079..e1a0b4a76 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -359,7 +359,7 @@ increase(http_requests_total{job="api-server"}[5m]) ``` `increase` acts on native histograms by calculating a new histogram where each -compononent (sum and count of observations, buckets) is the increase between +component (sum and count of observations, buckets) is the increase between the respective component in the first and last native histogram in `v`. However, each element in `v` that contains a mix of float and native histogram samples within the range, will be missing from the result vector. From 4c27fa0c1d718894a1a584f75f18a881950c93a3 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Sun, 28 May 2023 15:00:48 +0200 Subject: [PATCH 163/632] Update OpenTelemetry dependencies Signed-off-by: Julien Pivotto --- go.mod | 42 ++++++++++++++++-------------- go.sum | 82 +++++++++++++++++++++++++++++++--------------------------- 2 files changed, 66 insertions(+), 58 deletions(-) diff --git a/go.mod b/go.mod index c90285438..998c173cd 100644 --- a/go.mod +++ b/go.mod @@ -49,27 +49,27 @@ require ( github.com/prometheus/exporter-toolkit v0.9.1 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.8.3 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 - go.opentelemetry.io/otel v1.14.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0 - go.opentelemetry.io/otel/sdk v1.14.0 - go.opentelemetry.io/otel/trace v1.14.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 + go.opentelemetry.io/otel v1.16.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0 + go.opentelemetry.io/otel/sdk v1.16.0 + go.opentelemetry.io/otel/trace v1.16.0 go.uber.org/atomic v1.10.0 go.uber.org/automaxprocs v1.5.2 go.uber.org/goleak v1.2.1 - golang.org/x/net v0.9.0 + golang.org/x/net v0.10.0 golang.org/x/oauth2 v0.7.0 golang.org/x/sync v0.1.0 - golang.org/x/sys v0.7.0 + golang.org/x/sys v0.8.0 golang.org/x/time v0.3.0 golang.org/x/tools v0.8.0 google.golang.org/api v0.114.0 - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 - google.golang.org/grpc v1.53.0 + google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e + google.golang.org/grpc v1.55.0 google.golang.org/protobuf v1.30.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -87,10 +87,12 @@ require ( github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect + google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e // indirect ) require ( - cloud.google.com/go/compute v1.18.0 // indirect + cloud.google.com/go/compute v1.19.0 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect @@ -101,8 +103,8 @@ require ( github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.2.0 // indirect - github.com/cncf/xds/go v0.0.0-20230112175826-46e39c7b9b43 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect @@ -113,7 +115,7 @@ require ( github.com/felixge/httpsnoop v1.0.3 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-kit/kit v0.12.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.21.4 // indirect github.com/go-openapi/errors v0.20.3 // indirect @@ -125,7 +127,7 @@ require ( github.com/go-openapi/validate v0.22.1 // indirect github.com/go-resty/resty/v2 v2.7.0 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang/glog v1.0.0 // indirect + github.com/golang/glog v1.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.6.9 // indirect @@ -168,13 +170,13 @@ require ( github.com/spf13/pflag v1.0.5 // indirect go.mongodb.org/mongo-driver v1.11.3 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 // indirect - go.opentelemetry.io/otel/metric v0.37.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect + go.opentelemetry.io/otel/metric v1.16.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect golang.org/x/crypto v0.7.0 // indirect golang.org/x/exp v0.0.0-20230321023759-10a507213a29 golang.org/x/mod v0.10.0 // indirect - golang.org/x/term v0.7.0 // indirect + golang.org/x/term v0.8.0 // indirect golang.org/x/text v0.9.0 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 9ecc5c727..545c85a9e 100644 --- a/go.sum +++ b/go.sum @@ -19,8 +19,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= @@ -109,8 +109,8 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= -github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -130,8 +130,8 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230112175826-46e39c7b9b43 h1:XP+uhjN0yBCN/tPkr8Z0BNDc5rZam9RG6UWyf2FrSQ0= -github.com/cncf/xds/go v0.0.0-20230112175826-46e39c7b9b43/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLWvQsR9CzAKt2e+EWV2yX9oXQ4= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -216,8 +216,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= @@ -297,8 +297,9 @@ github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -737,8 +738,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -780,24 +782,24 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 h1:lE9EJyw3/JhrjWH/hEy9FptnalDQgj7vpbgC2KCCCxE= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0/go.mod h1:pcQ3MM3SWvrA71U4GDqv9UFDJ3HQsW7y5ZO3tDTlUdI= -go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= -go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 h1:/fXHZHGvro6MVqV34fJzDhi7sHGpX3Ej/Qjmfn003ho= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0/go.mod h1:UFG7EBMRdXyFstOwH028U0sVf+AvukSGhF0g8+dmNG8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 h1:TKf2uAs2ueguzLaxOCBXNpHxfO/aC7PAdDsSH0IbeRQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0/go.mod h1:HrbCVv40OOLTABmOn1ZWty6CHXkU8DK/Urc43tHug70= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 h1:ap+y8RXX3Mu9apKVtOkM6WSFESLM8K3wNQyOU8sWHcc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0/go.mod h1:5w41DY6S9gZrbjuq6Y+753e96WfPha5IcsOSZTtullM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0 h1:3jAYbRHQAqzLjd9I4tzxwJ8Pk/N6AqBcF6m1ZHrxG94= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0/go.mod h1:+N7zNjIJv4K+DeX67XXET0P+eIciESgaFDBqh+ZJFS4= -go.opentelemetry.io/otel/metric v0.37.0 h1:pHDQuLQOZwYD+Km0eb657A25NaRzy0a+eLyKfDXedEs= -go.opentelemetry.io/otel/metric v0.37.0/go.mod h1:DmdaHfGt54iV6UKxsV9slj2bBRJcKC1B1uvDLIioc1s= -go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= -go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM= -go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= -go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0/go.mod h1:XiYsayHc36K3EByOO6nbAXnAWbrUxdjUROCEeeROOH8= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 h1:t4ZwRPU+emrcvM2e9DHd0Fsf0JTPVcbfa/BhTDF03d0= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0/go.mod h1:vLarbg68dH2Wa77g71zmKQqlQ8+8Rq3GRG31uc0WcWI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 h1:cbsD4cUcviQGXdw8+bo5x2wazq10SKz8hEbtCRPcU78= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0/go.mod h1:JgXSGah17croqhJfhByOLVY719k1emAXC8MVhCIJlRs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 h1:TVQp/bboR4mhZSav+MdgXB8FaRho1RC8UwVn3T0vjVc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0/go.mod h1:I33vtIe0sR96wfrUcilIzLoA3mLHhRmz9S9Te0S3gDo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0 h1:iqjq9LAB8aK++sKVcELezzn655JnBNdsDhghU4G/So8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0/go.mod h1:hGXzO5bhhSHZnKvrDaXB82Y9DRFour0Nz/KrBh7reWw= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE= +go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -914,8 +916,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1006,14 +1008,14 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1148,8 +1150,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e h1:Ao9GzfUMPH3zjVfzXG5rlWlk+Q8MXWKwWpwVQE1MXfw= +google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e h1:AZX1ra8YbFMSb7+1pI8S9v4rrgRR7jU1FmuFSSjTVcQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e h1:NumxXLPfHSndr3wBBdeKiVHjGVFzi9RX2HwwQke94iY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1171,8 +1177,8 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= +google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 4341b98eb2f4dc3d41d5afa9726feef7ea35e3c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Sun, 28 May 2023 19:55:00 +0200 Subject: [PATCH 164/632] fix: apply suggested changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- cmd/promtool/main.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index b3d743e44..c55e5be1e 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -131,7 +131,7 @@ func main() { checkRulesCmd := checkCmd.Command("rules", "Check if the rule files are valid or not.") ruleFiles := checkRulesCmd.Arg( "rule-files", - "The rule files to check, default is read from standard input (STDIN).", + "The rule files to check, default is read from standard input.", ).ExistingFiles() checkRulesLint := checkRulesCmd.Flag( "lint", @@ -690,7 +690,7 @@ func CheckRules(ls lintConfig, files ...string) int { failed := false hasErrors := false - // add empty string to avoid matching filename + // Add empty string to avoid matching filename. if len(files) == 0 { files = append(files, "") } @@ -723,7 +723,7 @@ func checkRules(filename string, lintSettings lintConfig) (int, []error) { var rgs *rulefmt.RuleGroups var errs []error - // if filename is an empty string it is a stdin + // Empty string is stdin input. if filename == "" { data, err := io.ReadAll(os.Stdin) if err != nil { From 9cf1b4a2e635f6a959118fd5a19e3cba7ed54308 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Sun, 28 May 2023 22:31:57 +0200 Subject: [PATCH 165/632] fix: update doc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- docs/command-line/promtool.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 59c46dd79..6784fb99f 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -181,7 +181,7 @@ Check if the rule files are valid or not. | Argument | Description | | --- | --- | -| rule-files | The rule files to check, default is read from standard input (STDIN). | +| rule-files | The rule files to check, default is read from standard input. | From 6e7ac76981a53f6727da04f3a360494dc6696fb0 Mon Sep 17 00:00:00 2001 From: cui fliter Date: Mon, 29 May 2023 16:26:11 +0800 Subject: [PATCH 166/632] fix problematic link (#12405) Signed-off-by: cui fliter --- web/ui/module/codemirror-promql/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/ui/module/codemirror-promql/README.md b/web/ui/module/codemirror-promql/README.md index 441a81315..627e4fe15 100644 --- a/web/ui/module/codemirror-promql/README.md +++ b/web/ui/module/codemirror-promql/README.md @@ -1,7 +1,7 @@ CodeMirror-promql ================= -This project provides a mode for [CodeMirror Next](https://codemirror.net/6) that handles syntax highlighting, linting +This project provides a mode for [CodeMirror](https://codemirror.net/6/) that handles syntax highlighting, linting and autocompletion for PromQL ([Prometheus Query Language](https://prometheus.io/docs/introduction/overview/)). ![preview](https://user-images.githubusercontent.com/4548045/95660829-d5e4b680-0b2a-11eb-9ecb-41dca6396273.gif) @@ -15,7 +15,7 @@ npm install --save @prometheus-io/codemirror-promql ``` **Note:** You will have to manually install different packages that are part -of [CodeMirror Next](https://codemirror.net/6), as they are a peer dependency to this package. Here are the different +of [CodeMirror](https://codemirror.net/6/), as they are a peer dependency to this package. Here are the different packages you need to install: * **@codemirror/autocomplete** From 044e004a81157c681ca1c9512d1101bb05c6ff88 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Tue, 30 May 2023 09:08:00 +0200 Subject: [PATCH 167/632] Update exporter-toolkit Adds web config option `client_allowed_sans`. This enables Prometheus to limit the Subject Alternate Name (SAN) allowed to connect. Signed-off-by: SuperQ --- docs/configuration/https.md | 7 +++++++ go.mod | 4 ++-- go.sum | 8 ++++---- 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/docs/configuration/https.md b/docs/configuration/https.md index d31aca7b4..bc83e07a3 100644 --- a/docs/configuration/https.md +++ b/docs/configuration/https.md @@ -44,6 +44,13 @@ tls_server_config: # CA certificate for client certificate authentication to the server. [ client_ca_file: ] + # Verify that the client certificate has a Subject Alternate Name (SAN) + # which is an exact match to an entry in this list, else terminate the + # connection. SAN match can be one or multiple of the following: DNS, + # IP, e-mail, or URI address from https://pkg.go.dev/crypto/x509#Certificate. + [ client_allowed_sans: + [ - ] ] + # Minimum TLS version that is acceptable. [ min_version: | default = "TLS12" ] diff --git a/go.mod b/go.mod index 998c173cd..15daecc2c 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( github.com/prometheus/common v0.42.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 - github.com/prometheus/exporter-toolkit v0.9.1 + github.com/prometheus/exporter-toolkit v0.10.0 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/stretchr/testify v1.8.3 @@ -173,7 +173,7 @@ require ( go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect go.opentelemetry.io/otel/metric v1.16.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect - golang.org/x/crypto v0.7.0 // indirect + golang.org/x/crypto v0.8.0 // indirect golang.org/x/exp v0.0.0-20230321023759-10a507213a29 golang.org/x/mod v0.10.0 // indirect golang.org/x/term v0.8.0 // indirect diff --git a/go.sum b/go.sum index 545c85a9e..18d98dfae 100644 --- a/go.sum +++ b/go.sum @@ -671,8 +671,8 @@ github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/ github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.9.1 h1:cNkC01riqiOS+kh3zdnNwRsbe/Blh0WwK3ij5rPJ9Sw= -github.com/prometheus/exporter-toolkit v0.9.1/go.mod h1:iFlTmFISCix0vyuyBmm0UqOUCTao9+RsAsKJP3YM9ec= +github.com/prometheus/exporter-toolkit v0.10.0 h1:yOAzZTi4M22ZzVxD+fhy1URTuNRj/36uQJJ5S8IPza8= +github.com/prometheus/exporter-toolkit v0.10.0/go.mod h1:+sVFzuvV5JDyw+Ih6p3zFxZNVnKQa3x5qPmDSiPu4ZY= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -832,8 +832,8 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= From a16b876a05faef651f6fd4f3dd71a32a996ba367 Mon Sep 17 00:00:00 2001 From: Arianna Vespri <36129782+vesari@users.noreply.github.com> Date: Tue, 30 May 2023 10:22:23 +0200 Subject: [PATCH 168/632] Add limits to global config (#12126) * Add limits to global config Signed-off-by: Arianna Vespri * Move changes into Validate func Signed-off-by: Arianna Vespri * Make comments consistent wrt 0 meaning no limit Signed-off-by: Arianna Vespri * Document global limits Signed-off-by: Arianna Vespri --------- Signed-off-by: Arianna Vespri --- config/config.go | 61 ++++- config/config_test.go | 399 +++++++++++++++++++++------- config/testdata/conf.good.yml | 11 + docs/configuration/configuration.md | 33 +++ 4 files changed, 394 insertions(+), 110 deletions(-) diff --git a/config/config.go b/config/config.go index d0ba03ab2..9f81bbfd5 100644 --- a/config/config.go +++ b/config/config.go @@ -267,7 +267,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { for i, scfg := range c.ScrapeConfigs { // We do these checks for library users that would not call Validate in // Unmarshal. - if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil { + if err := scfg.Validate(c.GlobalConfig); err != nil { return nil, err } @@ -294,7 +294,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { return nil, fileErr(filename, err) } for _, scfg := range cfg.ScrapeConfigs { - if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil { + if err := scfg.Validate(c.GlobalConfig); err != nil { return nil, fileErr(filename, err) } @@ -343,7 +343,7 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { // Do global overrides and validate unique names. jobNames := map[string]struct{}{} for _, scfg := range c.ScrapeConfigs { - if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil { + if err := scfg.Validate(c.GlobalConfig); err != nil { return err } @@ -390,6 +390,24 @@ type GlobalConfig struct { QueryLogFile string `yaml:"query_log_file,omitempty"` // The labels to add to any timeseries that this Prometheus instance scrapes. ExternalLabels labels.Labels `yaml:"external_labels,omitempty"` + // An uncompressed response body larger than this many bytes will cause the + // scrape to fail. 0 means no limit. + BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"` + // More than this many samples post metric-relabeling will cause the scrape to + // fail. 0 means no limit. + SampleLimit uint `yaml:"sample_limit,omitempty"` + // More than this many targets after the target relabeling will cause the + // scrapes to fail. 0 means no limit. + TargetLimit uint `yaml:"target_limit,omitempty"` + // More than this many labels post metric-relabeling will cause the scrape to + // fail. 0 means no limit. + LabelLimit uint `yaml:"label_limit,omitempty"` + // More than this label name length post metric-relabeling will cause the + // scrape to fail. 0 means no limit. + LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"` + // More than this label value length post metric-relabeling will cause the + // scrape to fail. 0 means no limit. + LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -478,19 +496,19 @@ type ScrapeConfig struct { // scrape to fail. 0 means no limit. BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"` // More than this many samples post metric-relabeling will cause the scrape to - // fail. + // fail. 0 means no limit. SampleLimit uint `yaml:"sample_limit,omitempty"` // More than this many targets after the target relabeling will cause the - // scrapes to fail. + // scrapes to fail. 0 means no limit. TargetLimit uint `yaml:"target_limit,omitempty"` // More than this many labels post metric-relabeling will cause the scrape to - // fail. + // fail. 0 means no limit. LabelLimit uint `yaml:"label_limit,omitempty"` // More than this label name length post metric-relabeling will cause the - // scrape to fail. + // scrape to fail. 0 means no limit. LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"` // More than this label value length post metric-relabeling will cause the - // scrape to fail. + // scrape to fail. 0 means no limit. LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"` // More than this many buckets in a native histogram will cause the scrape to // fail. @@ -552,25 +570,44 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return nil } -func (c *ScrapeConfig) Validate(defaultInterval, defaultTimeout model.Duration) error { +func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { if c == nil { return errors.New("empty or null scrape config section") } // First set the correct scrape interval, then check that the timeout // (inferred or explicit) is not greater than that. if c.ScrapeInterval == 0 { - c.ScrapeInterval = defaultInterval + c.ScrapeInterval = globalConfig.ScrapeInterval } if c.ScrapeTimeout > c.ScrapeInterval { return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", c.JobName) } if c.ScrapeTimeout == 0 { - if defaultTimeout > c.ScrapeInterval { + if globalConfig.ScrapeTimeout > c.ScrapeInterval { c.ScrapeTimeout = c.ScrapeInterval } else { - c.ScrapeTimeout = defaultTimeout + c.ScrapeTimeout = globalConfig.ScrapeTimeout } } + if c.BodySizeLimit == 0 { + c.BodySizeLimit = globalConfig.BodySizeLimit + } + if c.SampleLimit == 0 { + c.SampleLimit = globalConfig.SampleLimit + } + if c.TargetLimit == 0 { + c.TargetLimit = globalConfig.TargetLimit + } + if c.LabelLimit == 0 { + c.LabelLimit = globalConfig.LabelLimit + } + if c.LabelNameLengthLimit == 0 { + c.LabelNameLengthLimit = globalConfig.LabelNameLengthLimit + } + if c.LabelValueLengthLimit == 0 { + c.LabelValueLengthLimit = globalConfig.LabelValueLengthLimit + } + return nil } diff --git a/config/config_test.go b/config/config_test.go index bde09dfec..d243d687c 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -68,6 +68,15 @@ func mustParseURL(u string) *config.URL { return &config.URL{URL: parsed} } +const ( + globBodySizeLimit = 15 * units.MiB + globSampleLimit = 1500 + globTargetLimit = 30 + globLabelLimit = 30 + globLabelNameLengthLimit = 200 + globLabelValueLengthLimit = 200 +) + var expectedConf = &Config{ GlobalConfig: GlobalConfig{ ScrapeInterval: model.Duration(15 * time.Second), @@ -76,6 +85,13 @@ var expectedConf = &Config{ QueryLogFile: "", ExternalLabels: labels.FromStrings("foo", "bar", "monitor", "codelab"), + + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, }, RuleFiles: []string{ @@ -165,10 +181,16 @@ var expectedConf = &Config{ { JobName: "prometheus", - HonorLabels: true, - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorLabels: true, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -261,11 +283,15 @@ var expectedConf = &Config{ { JobName: "service-x", - HonorTimestamps: true, - ScrapeInterval: model.Duration(50 * time.Second), - ScrapeTimeout: model.Duration(5 * time.Second), - BodySizeLimit: 10 * units.MiB, - SampleLimit: 1000, + HonorTimestamps: true, + ScrapeInterval: model.Duration(50 * time.Second), + ScrapeTimeout: model.Duration(5 * time.Second), + BodySizeLimit: 10 * units.MiB, + SampleLimit: 1000, + TargetLimit: 35, + LabelLimit: 35, + LabelNameLengthLimit: 210, + LabelValueLengthLimit: 210, HTTPClientConfig: config.HTTPClientConfig{ BasicAuth: &config.BasicAuth{ @@ -352,9 +378,15 @@ var expectedConf = &Config{ { JobName: "service-y", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -399,9 +431,15 @@ var expectedConf = &Config{ { JobName: "service-z", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: model.Duration(10 * time.Second), + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: "/metrics", Scheme: "http", @@ -424,9 +462,15 @@ var expectedConf = &Config{ { JobName: "service-kubernetes", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -455,9 +499,15 @@ var expectedConf = &Config{ { JobName: "service-kubernetes-namespaces", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -486,9 +536,15 @@ var expectedConf = &Config{ { JobName: "service-kuma", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -506,9 +562,15 @@ var expectedConf = &Config{ { JobName: "service-marathon", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -535,9 +597,15 @@ var expectedConf = &Config{ { JobName: "service-nomad", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -561,9 +629,15 @@ var expectedConf = &Config{ { JobName: "service-ec2", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -594,9 +668,15 @@ var expectedConf = &Config{ { JobName: "service-lightsail", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -617,9 +697,15 @@ var expectedConf = &Config{ { JobName: "service-azure", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -643,9 +729,15 @@ var expectedConf = &Config{ { JobName: "service-nerve", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -662,9 +754,15 @@ var expectedConf = &Config{ { JobName: "0123service-xxx", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -684,9 +782,15 @@ var expectedConf = &Config{ { JobName: "badfederation", - HonorTimestamps: false, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: false, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: "/federate", Scheme: DefaultScrapeConfig.Scheme, @@ -706,9 +810,15 @@ var expectedConf = &Config{ { JobName: "測試", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -728,9 +838,15 @@ var expectedConf = &Config{ { JobName: "httpsd", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -747,9 +863,15 @@ var expectedConf = &Config{ { JobName: "service-triton", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -774,9 +896,15 @@ var expectedConf = &Config{ { JobName: "digitalocean-droplets", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -800,9 +928,15 @@ var expectedConf = &Config{ { JobName: "docker", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -822,9 +956,15 @@ var expectedConf = &Config{ { JobName: "dockerswarm", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -844,9 +984,15 @@ var expectedConf = &Config{ { JobName: "service-openstack", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -870,9 +1016,15 @@ var expectedConf = &Config{ { JobName: "service-puppetdb", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -898,10 +1050,16 @@ var expectedConf = &Config{ }, }, { - JobName: "hetzner", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + JobName: "hetzner", + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -947,9 +1105,15 @@ var expectedConf = &Config{ { JobName: "service-eureka", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -966,9 +1130,16 @@ var expectedConf = &Config{ { JobName: "ovhcloud", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + HTTPClientConfig: config.DefaultHTTPClientConfig, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -995,9 +1166,16 @@ var expectedConf = &Config{ { JobName: "scaleway", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + HTTPClientConfig: config.DefaultHTTPClientConfig, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1030,9 +1208,15 @@ var expectedConf = &Config{ { JobName: "linode-instances", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1057,9 +1241,16 @@ var expectedConf = &Config{ { JobName: "uyuni", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + HTTPClientConfig: config.DefaultHTTPClientConfig, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1076,10 +1267,16 @@ var expectedConf = &Config{ }, }, { - JobName: "ionos", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + JobName: "ionos", + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1101,9 +1298,15 @@ var expectedConf = &Config{ { JobName: "vultr", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index 388b9de32..19cfe1eb5 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -2,6 +2,12 @@ global: scrape_interval: 15s evaluation_interval: 30s + body_size_limit: 15MB + sample_limit: 1500 + target_limit: 30 + label_limit: 30 + label_name_length_limit: 200 + label_value_length_limit: 200 # scrape_timeout is set to the global default (10s). external_labels: @@ -111,6 +117,11 @@ scrape_configs: body_size_limit: 10MB sample_limit: 1000 + target_limit: 35 + label_limit: 35 + label_name_length_limit: 210 + label_value_length_limit: 210 + metrics_path: /my_path scheme: https diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index b0b587e02..ff1449e34 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -73,6 +73,39 @@ global: # Reloading the configuration will reopen the file. [ query_log_file: ] + # An uncompressed response body larger than this many bytes will cause the + # scrape to fail. 0 means no limit. Example: 100MB. + # This is an experimental feature, this behaviour could + # change or be removed in the future. + [ body_size_limit: | default = 0 ] + + # Per-scrape limit on number of scraped samples that will be accepted. + # If more than this number of samples are present after metric relabeling + # the entire scrape will be treated as failed. 0 means no limit. + [ sample_limit: | default = 0 ] + + # Per-scrape limit on number of labels that will be accepted for a sample. If + # more than this number of labels are present post metric-relabeling, the + # entire scrape will be treated as failed. 0 means no limit. + [ label_limit: | default = 0 ] + + # Per-scrape limit on length of labels name that will be accepted for a sample. + # If a label name is longer than this number post metric-relabeling, the entire + # scrape will be treated as failed. 0 means no limit. + [ label_name_length_limit: | default = 0 ] + + # Per-scrape limit on length of labels value that will be accepted for a sample. + # If a label value is longer than this number post metric-relabeling, the + # entire scrape will be treated as failed. 0 means no limit. + [ label_value_length_limit: | default = 0 ] + + # Per-scrape config limit on number of unique targets that will be + # accepted. If more than this number of targets are present after target + # relabeling, Prometheus will mark the targets as failed without scraping them. + # 0 means no limit. This is an experimental feature, this behaviour could + # change in the future. + [ target_limit: | default = 0 ] + # Rule files specifies a list of globs. Rules and alerts are read from # all matching files. rule_files: From 73078bf73870be23142d3c6703d52b5da3279d17 Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Tue, 30 May 2023 04:49:22 -0700 Subject: [PATCH 169/632] Opmizing Group Regex (#12375) Signed-off-by: Alan Protasio --- tsdb/querier.go | 9 ++++++++- tsdb/querier_bench_test.go | 4 ++++ tsdb/querier_test.go | 21 +++++++++++++++++++++ 3 files changed, 33 insertions(+), 1 deletion(-) diff --git a/tsdb/querier.go b/tsdb/querier.go index 9baf3f242..72b6b5141 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -190,7 +190,14 @@ func findSetMatches(pattern string) []string { } escaped := false sets := []*strings.Builder{{}} - for i := 4; i < len(pattern)-2; i++ { + init := 4 + end := len(pattern) - 2 + // If the regex is wrapped in a group we can remove the first and last parentheses + if pattern[init] == '(' && pattern[end-1] == ')' { + init++ + end-- + } + for i := init; i < end; i++ { if escaped { switch { case isRegexMetaCharacter(pattern[i]): diff --git a/tsdb/querier_bench_test.go b/tsdb/querier_bench_test.go index 19619e35b..c6deaeb44 100644 --- a/tsdb/querier_bench_test.go +++ b/tsdb/querier_bench_test.go @@ -113,6 +113,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) { jXplus := labels.MustNewMatcher(labels.MatchRegexp, "j", "X.+") iCharSet := labels.MustNewMatcher(labels.MatchRegexp, "i", "1[0-9]") iAlternate := labels.MustNewMatcher(labels.MatchRegexp, "i", "(1|2|3|4|5|6|20|55)") + iNotAlternate := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "(1|2|3|4|5|6|20|55)") iXYZ := labels.MustNewMatcher(labels.MatchRegexp, "i", "X|Y|Z") iNotXYZ := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "X|Y|Z") cases := []struct { @@ -132,6 +133,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) { {`j=~"XXX|YYY"`, []*labels.Matcher{jXXXYYY}}, {`j=~"X.+"`, []*labels.Matcher{jXplus}}, {`i=~"(1|2|3|4|5|6|20|55)"`, []*labels.Matcher{iAlternate}}, + {`i!~"(1|2|3|4|5|6|20|55)"`, []*labels.Matcher{iNotAlternate}}, {`i=~"X|Y|Z"`, []*labels.Matcher{iXYZ}}, {`i!~"X|Y|Z"`, []*labels.Matcher{iNotXYZ}}, {`i=~".*"`, []*labels.Matcher{iStar}}, @@ -161,6 +163,8 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) { for _, c := range cases { b.Run(c.name, func(b *testing.B) { + b.ReportAllocs() + b.ResetTimer() for i := 0; i < b.N; i++ { _, err := PostingsForMatchers(ir, c.matchers...) require.NoError(b, err) diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 5bf721a62..e9dd3b75f 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -2051,6 +2051,12 @@ func TestPostingsForMatchers(t *testing.T) { labels.FromStrings("n", "2"), }, }, + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "(1|2.5)")}, + exp: []labels.Labels{ + labels.FromStrings("n", "2"), + }, + }, { matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^a$")}, exp: []labels.Labels{ @@ -2112,6 +2118,13 @@ func TestPostingsForMatchers(t *testing.T) { labels.FromStrings("n", "1", "i", "b"), }, }, + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "i", "(a|b)")}, + exp: []labels.Labels{ + labels.FromStrings("n", "1", "i", "a"), + labels.FromStrings("n", "1", "i", "b"), + }, + }, { matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", "x1|2")}, exp: []labels.Labels{ @@ -2134,6 +2147,14 @@ func TestPostingsForMatchers(t *testing.T) { labels.FromStrings("n", "2.5"), }, }, + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "i", "(c||d)")}, + exp: []labels.Labels{ + labels.FromStrings("n", "1"), + labels.FromStrings("n", "2"), + labels.FromStrings("n", "2.5"), + }, + }, } ir, err := h.Index() From 866fa25b2025a81b31d80ab04aacfb07196cca80 Mon Sep 17 00:00:00 2001 From: "renzheng.wang" Date: Tue, 30 May 2023 20:13:00 +0800 Subject: [PATCH 170/632] add label and labelpresent for endpointslice role in k8s discovery Signed-off-by: renzheng.wang --- discovery/kubernetes/endpoints.go | 24 +--- discovery/kubernetes/endpointslice.go | 4 +- discovery/kubernetes/endpointslice_adaptor.go | 10 ++ discovery/kubernetes/endpointslice_test.go | 122 +++++++++++------- discovery/kubernetes/ingress.go | 28 +--- discovery/kubernetes/ingress_adaptor.go | 25 ++-- discovery/kubernetes/kubernetes.go | 17 +++ discovery/kubernetes/node.go | 23 +--- discovery/kubernetes/pod.go | 21 +-- discovery/kubernetes/service.go | 18 +-- 10 files changed, 134 insertions(+), 158 deletions(-) diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index 2413dab45..e0d5536d1 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -29,7 +29,6 @@ import ( "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/util/strutil" ) var ( @@ -248,9 +247,6 @@ func endpointsSourceFromNamespaceAndName(namespace, name string) string { } const ( - endpointsLabelPrefix = metaLabelPrefix + "endpoints_label_" - endpointsLabelPresentPrefix = metaLabelPrefix + "endpoints_labelpresent_" - endpointsNameLabel = metaLabelPrefix + "endpoints_name" endpointNodeName = metaLabelPrefix + "endpoint_node_name" endpointHostname = metaLabelPrefix + "endpoint_hostname" endpointReadyLabel = metaLabelPrefix + "endpoint_ready" @@ -265,16 +261,11 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { Source: endpointsSource(eps), } tg.Labels = model.LabelSet{ - namespaceLabel: lv(eps.Namespace), - endpointsNameLabel: lv(eps.Name), + namespaceLabel: lv(eps.Namespace), } e.addServiceLabels(eps.Namespace, eps.Name, tg) // Add endpoints labels metadata. - for k, v := range eps.Labels { - ln := strutil.SanitizeLabelName(k) - tg.Labels[model.LabelName(endpointsLabelPrefix+ln)] = lv(v) - tg.Labels[model.LabelName(endpointsLabelPresentPrefix+ln)] = presentValue - } + addObjectMetaLabels(tg.Labels, eps.ObjectMeta, RoleEndpoint) type podEntry struct { pod *apiv1.Pod @@ -462,14 +453,7 @@ func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger log.L node := obj.(*apiv1.Node) // Allocate one target label for the node name, - // and two target labels for each node label. - nodeLabelset := make(model.LabelSet, 1+2*len(node.GetLabels())) - nodeLabelset[nodeNameLabel] = lv(*nodeName) - for k, v := range node.GetLabels() { - ln := strutil.SanitizeLabelName(k) - nodeLabelset[model.LabelName(nodeLabelPrefix+ln)] = lv(v) - nodeLabelset[model.LabelName(nodeLabelPresentPrefix+ln)] = presentValue - } - + nodeLabelset := make(model.LabelSet) + addObjectMetaLabels(nodeLabelset, node.ObjectMeta, RoleNode) return tg.Merge(nodeLabelset) } diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index c7df64252..ed23a95b3 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -274,9 +274,11 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou } tg.Labels = model.LabelSet{ namespaceLabel: lv(eps.namespace()), - endpointSliceNameLabel: lv(eps.name()), endpointSliceAddressTypeLabel: lv(eps.addressType()), } + + addObjectMetaLabels(tg.Labels, eps.getObjectMeta(), RoleEndpointSlice) + e.addServiceLabels(eps, tg) type podEntry struct { diff --git a/discovery/kubernetes/endpointslice_adaptor.go b/discovery/kubernetes/endpointslice_adaptor.go index 5a21f1b89..46fa708c1 100644 --- a/discovery/kubernetes/endpointslice_adaptor.go +++ b/discovery/kubernetes/endpointslice_adaptor.go @@ -17,11 +17,13 @@ import ( corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/discovery/v1" "k8s.io/api/discovery/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // endpointSliceAdaptor is an adaptor for the different EndpointSlice versions type endpointSliceAdaptor interface { get() interface{} + getObjectMeta() metav1.ObjectMeta name() string namespace() string addressType() string @@ -66,6 +68,10 @@ func (e *endpointSliceAdaptorV1) get() interface{} { return e.endpointSlice } +func (e *endpointSliceAdaptorV1) getObjectMeta() metav1.ObjectMeta { + return e.endpointSlice.ObjectMeta +} + func (e *endpointSliceAdaptorV1) name() string { return e.endpointSlice.ObjectMeta.Name } @@ -115,6 +121,10 @@ func (e *endpointSliceAdaptorV1Beta1) get() interface{} { return e.endpointSlice } +func (e *endpointSliceAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta { + return e.endpointSlice.ObjectMeta +} + func (e *endpointSliceAdaptorV1Beta1) name() string { return e.endpointSlice.Name } diff --git a/discovery/kubernetes/endpointslice_test.go b/discovery/kubernetes/endpointslice_test.go index 8104e3db3..42e64a056 100644 --- a/discovery/kubernetes/endpointslice_test.go +++ b/discovery/kubernetes/endpointslice_test.go @@ -219,9 +219,11 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", }, Source: "endpointslice/default/testendpoints", }, @@ -280,9 +282,11 @@ func TestEndpointSliceDiscoveryBeforeRunV1beta1(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", }, Source: "endpointslice/default/testendpoints", }, @@ -478,9 +482,11 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) { }, }, Labels: map[model.LabelName]model.LabelValue{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_namespace": "default", }, }, }, @@ -574,9 +580,11 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_namespace": "default", }, }, }, @@ -659,9 +667,11 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_namespace": "default", }, Source: "endpointslice/default/testendpoints", }, @@ -739,12 +749,14 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_service_label_app_name": "test", - "__meta_kubernetes_service_labelpresent_app_name": "true", - "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_service_label_app_name": "test", + "__meta_kubernetes_service_labelpresent_app_name": "true", + "__meta_kubernetes_service_name": "testendpoints", }, Source: "endpointslice/default/testendpoints", }, @@ -835,14 +847,16 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_service_label_app_name": "svc", - "__meta_kubernetes_service_label_component": "testing", - "__meta_kubernetes_service_labelpresent_app_name": "true", - "__meta_kubernetes_service_labelpresent_component": "true", - "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_service_label_app_name": "svc", + "__meta_kubernetes_service_label_component": "testing", + "__meta_kubernetes_service_labelpresent_app_name": "true", + "__meta_kubernetes_service_labelpresent_component": "true", + "__meta_kubernetes_service_name": "testendpoints", }, Source: "endpointslice/default/testendpoints", }, @@ -927,12 +941,14 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_service_label_app_name": "test", - "__meta_kubernetes_service_labelpresent_app_name": "true", - "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_service_label_app_name": "test", + "__meta_kubernetes_service_labelpresent_app_name": "true", + "__meta_kubernetes_service_name": "testendpoints", }, Source: "endpointslice/default/testendpoints", }, @@ -1023,12 +1039,14 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_service_label_app_name": "test", - "__meta_kubernetes_service_labelpresent_app_name": "true", - "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_service_label_app_name": "test", + "__meta_kubernetes_service_labelpresent_app_name": "true", + "__meta_kubernetes_service_name": "testendpoints", }, Source: "endpointslice/default/testendpoints", }, @@ -1159,12 +1177,14 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_namespace": "ns1", - "__meta_kubernetes_service_label_app": "app1", - "__meta_kubernetes_service_labelpresent_app": "true", - "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_namespace": "ns1", + "__meta_kubernetes_service_label_app": "app1", + "__meta_kubernetes_service_labelpresent_app": "true", + "__meta_kubernetes_service_name": "testendpoints", }, Source: "endpointslice/ns1/testendpoints", }, @@ -1303,9 +1323,11 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_namespace": "own-ns", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_namespace": "own-ns", + "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", + "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", }, Source: "endpointslice/own-ns/testendpoints", }, diff --git a/discovery/kubernetes/ingress.go b/discovery/kubernetes/ingress.go index ad47c341a..697b6f519 100644 --- a/discovery/kubernetes/ingress.go +++ b/discovery/kubernetes/ingress.go @@ -28,7 +28,6 @@ import ( "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/util/strutil" ) var ( @@ -143,37 +142,22 @@ func ingressSourceFromNamespaceAndName(namespace, name string) string { } const ( - ingressNameLabel = metaLabelPrefix + "ingress_name" - ingressLabelPrefix = metaLabelPrefix + "ingress_label_" - ingressLabelPresentPrefix = metaLabelPrefix + "ingress_labelpresent_" - ingressAnnotationPrefix = metaLabelPrefix + "ingress_annotation_" - ingressAnnotationPresentPrefix = metaLabelPrefix + "ingress_annotationpresent_" - ingressSchemeLabel = metaLabelPrefix + "ingress_scheme" - ingressHostLabel = metaLabelPrefix + "ingress_host" - ingressPathLabel = metaLabelPrefix + "ingress_path" - ingressClassNameLabel = metaLabelPrefix + "ingress_class_name" + ingressSchemeLabel = metaLabelPrefix + "ingress_scheme" + ingressHostLabel = metaLabelPrefix + "ingress_host" + ingressPathLabel = metaLabelPrefix + "ingress_path" + ingressClassNameLabel = metaLabelPrefix + "ingress_class_name" ) func ingressLabels(ingress ingressAdaptor) model.LabelSet { // Each label and annotation will create two key-value pairs in the map. - ls := make(model.LabelSet, 2*(len(ingress.labels())+len(ingress.annotations()))+2) - ls[ingressNameLabel] = lv(ingress.name()) + ls := make(model.LabelSet) ls[namespaceLabel] = lv(ingress.namespace()) if cls := ingress.ingressClassName(); cls != nil { ls[ingressClassNameLabel] = lv(*cls) } - for k, v := range ingress.labels() { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(ingressLabelPrefix+ln)] = lv(v) - ls[model.LabelName(ingressLabelPresentPrefix+ln)] = presentValue - } + addObjectMetaLabels(ls, ingress.getObjectMeta(), RoleIngress) - for k, v := range ingress.annotations() { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(ingressAnnotationPrefix+ln)] = lv(v) - ls[model.LabelName(ingressAnnotationPresentPrefix+ln)] = presentValue - } return ls } diff --git a/discovery/kubernetes/ingress_adaptor.go b/discovery/kubernetes/ingress_adaptor.go index 113a067ca..7be8538b5 100644 --- a/discovery/kubernetes/ingress_adaptor.go +++ b/discovery/kubernetes/ingress_adaptor.go @@ -16,10 +16,12 @@ package kubernetes import ( v1 "k8s.io/api/networking/v1" "k8s.io/api/networking/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // ingressAdaptor is an adaptor for the different Ingress versions type ingressAdaptor interface { + getObjectMeta() metav1.ObjectMeta name() string namespace() string labels() map[string]string @@ -43,11 +45,12 @@ func newIngressAdaptorFromV1(ingress *v1.Ingress) ingressAdaptor { return &ingressAdaptorV1{ingress: ingress} } -func (i *ingressAdaptorV1) name() string { return i.ingress.Name } -func (i *ingressAdaptorV1) namespace() string { return i.ingress.Namespace } -func (i *ingressAdaptorV1) labels() map[string]string { return i.ingress.Labels } -func (i *ingressAdaptorV1) annotations() map[string]string { return i.ingress.Annotations } -func (i *ingressAdaptorV1) ingressClassName() *string { return i.ingress.Spec.IngressClassName } +func (i *ingressAdaptorV1) getObjectMeta() metav1.ObjectMeta { return i.ingress.ObjectMeta } +func (i *ingressAdaptorV1) name() string { return i.ingress.Name } +func (i *ingressAdaptorV1) namespace() string { return i.ingress.Namespace } +func (i *ingressAdaptorV1) labels() map[string]string { return i.ingress.Labels } +func (i *ingressAdaptorV1) annotations() map[string]string { return i.ingress.Annotations } +func (i *ingressAdaptorV1) ingressClassName() *string { return i.ingress.Spec.IngressClassName } func (i *ingressAdaptorV1) tlsHosts() []string { var hosts []string @@ -95,12 +98,12 @@ type ingressAdaptorV1Beta1 struct { func newIngressAdaptorFromV1beta1(ingress *v1beta1.Ingress) ingressAdaptor { return &ingressAdaptorV1Beta1{ingress: ingress} } - -func (i *ingressAdaptorV1Beta1) name() string { return i.ingress.Name } -func (i *ingressAdaptorV1Beta1) namespace() string { return i.ingress.Namespace } -func (i *ingressAdaptorV1Beta1) labels() map[string]string { return i.ingress.Labels } -func (i *ingressAdaptorV1Beta1) annotations() map[string]string { return i.ingress.Annotations } -func (i *ingressAdaptorV1Beta1) ingressClassName() *string { return i.ingress.Spec.IngressClassName } +func (i *ingressAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta { return i.ingress.ObjectMeta } +func (i *ingressAdaptorV1Beta1) name() string { return i.ingress.Name } +func (i *ingressAdaptorV1Beta1) namespace() string { return i.ingress.Namespace } +func (i *ingressAdaptorV1Beta1) labels() map[string]string { return i.ingress.Labels } +func (i *ingressAdaptorV1Beta1) annotations() map[string]string { return i.ingress.Annotations } +func (i *ingressAdaptorV1Beta1) ingressClassName() *string { return i.ingress.Spec.IngressClassName } func (i *ingressAdaptorV1Beta1) tlsHosts() []string { var hosts []string diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index e87a1c9b2..d45ac4193 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "github.com/prometheus/prometheus/util/strutil" "os" "reflect" "strings" @@ -843,3 +844,19 @@ func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) { // https://kubernetes.io/docs/reference/using-api/deprecation-guide/#v1-25 return semVer.Major() >= 1 && semVer.Minor() >= 21, nil } + +func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta, role Role) { + labelSet[model.LabelName(metaLabelPrefix+string(role)+"_name")] = lv(objectMeta.Name) + + for k, v := range objectMeta.Labels { + ln := strutil.SanitizeLabelName(k) + labelSet[model.LabelName(metaLabelPrefix+string(role)+"_label_"+ln)] = lv(v) + labelSet[model.LabelName(metaLabelPrefix+string(role)+"_labelpresent_"+ln)] = presentValue + } + + for k, v := range objectMeta.Annotations { + ln := strutil.SanitizeLabelName(k) + labelSet[model.LabelName(metaLabelPrefix+string(role)+"_annotation_"+ln)] = lv(v) + labelSet[model.LabelName(metaLabelPrefix+string(role)+"_annotationpresent_"+ln)] = presentValue + } +} diff --git a/discovery/kubernetes/node.go b/discovery/kubernetes/node.go index d0a6d2780..6a20e7b1f 100644 --- a/discovery/kubernetes/node.go +++ b/discovery/kubernetes/node.go @@ -152,33 +152,18 @@ func nodeSourceFromName(name string) string { } const ( - nodeNameLabel = metaLabelPrefix + "node_name" - nodeProviderIDLabel = metaLabelPrefix + "node_provider_id" - nodeLabelPrefix = metaLabelPrefix + "node_label_" - nodeLabelPresentPrefix = metaLabelPrefix + "node_labelpresent_" - nodeAnnotationPrefix = metaLabelPrefix + "node_annotation_" - nodeAnnotationPresentPrefix = metaLabelPrefix + "node_annotationpresent_" - nodeAddressPrefix = metaLabelPrefix + "node_address_" + nodeProviderIDLabel = metaLabelPrefix + "node_provider_id" + nodeAddressPrefix = metaLabelPrefix + "node_address_" ) func nodeLabels(n *apiv1.Node) model.LabelSet { // Each label and annotation will create two key-value pairs in the map. - ls := make(model.LabelSet, 2*(len(n.Labels)+len(n.Annotations))+1) + ls := make(model.LabelSet) - ls[nodeNameLabel] = lv(n.Name) ls[nodeProviderIDLabel] = lv(n.Spec.ProviderID) - for k, v := range n.Labels { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(nodeLabelPrefix+ln)] = lv(v) - ls[model.LabelName(nodeLabelPresentPrefix+ln)] = presentValue - } + addObjectMetaLabels(ls, n.ObjectMeta, RoleNode) - for k, v := range n.Annotations { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(nodeAnnotationPrefix+ln)] = lv(v) - ls[model.LabelName(nodeAnnotationPresentPrefix+ln)] = presentValue - } return ls } diff --git a/discovery/kubernetes/pod.go b/discovery/kubernetes/pod.go index 732cf52ad..74f74c1f7 100644 --- a/discovery/kubernetes/pod.go +++ b/discovery/kubernetes/pod.go @@ -30,7 +30,6 @@ import ( "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/util/strutil" ) const nodeIndex = "node" @@ -180,7 +179,6 @@ func convertToPod(o interface{}) (*apiv1.Pod, error) { } const ( - podNameLabel = metaLabelPrefix + "pod_name" podIPLabel = metaLabelPrefix + "pod_ip" podContainerNameLabel = metaLabelPrefix + "pod_container_name" podContainerIDLabel = metaLabelPrefix + "pod_container_id" @@ -191,10 +189,6 @@ const ( podContainerIsInit = metaLabelPrefix + "pod_container_init" podReadyLabel = metaLabelPrefix + "pod_ready" podPhaseLabel = metaLabelPrefix + "pod_phase" - podLabelPrefix = metaLabelPrefix + "pod_label_" - podLabelPresentPrefix = metaLabelPrefix + "pod_labelpresent_" - podAnnotationPrefix = metaLabelPrefix + "pod_annotation_" - podAnnotationPresentPrefix = metaLabelPrefix + "pod_annotationpresent_" podNodeNameLabel = metaLabelPrefix + "pod_node_name" podHostIPLabel = metaLabelPrefix + "pod_host_ip" podUID = metaLabelPrefix + "pod_uid" @@ -215,7 +209,6 @@ func GetControllerOf(controllee metav1.Object) *metav1.OwnerReference { func podLabels(pod *apiv1.Pod) model.LabelSet { ls := model.LabelSet{ - podNameLabel: lv(pod.ObjectMeta.Name), podIPLabel: lv(pod.Status.PodIP), podReadyLabel: podReady(pod), podPhaseLabel: lv(string(pod.Status.Phase)), @@ -224,6 +217,8 @@ func podLabels(pod *apiv1.Pod) model.LabelSet { podUID: lv(string(pod.ObjectMeta.UID)), } + addObjectMetaLabels(ls, pod.ObjectMeta, RolePod) + createdBy := GetControllerOf(pod) if createdBy != nil { if createdBy.Kind != "" { @@ -234,18 +229,6 @@ func podLabels(pod *apiv1.Pod) model.LabelSet { } } - for k, v := range pod.Labels { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(podLabelPrefix+ln)] = lv(v) - ls[model.LabelName(podLabelPresentPrefix+ln)] = presentValue - } - - for k, v := range pod.Annotations { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(podAnnotationPrefix+ln)] = lv(v) - ls[model.LabelName(podAnnotationPresentPrefix+ln)] = presentValue - } - return ls } diff --git a/discovery/kubernetes/service.go b/discovery/kubernetes/service.go index 40e17679e..96cac3365 100644 --- a/discovery/kubernetes/service.go +++ b/discovery/kubernetes/service.go @@ -28,7 +28,6 @@ import ( "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/util/strutil" ) var ( @@ -162,23 +161,10 @@ const ( ) func serviceLabels(svc *apiv1.Service) model.LabelSet { - // Each label and annotation will create two key-value pairs in the map. - ls := make(model.LabelSet, 2*(len(svc.Labels)+len(svc.Annotations))+2) - - ls[serviceNameLabel] = lv(svc.Name) + ls := make(model.LabelSet) ls[namespaceLabel] = lv(svc.Namespace) + addObjectMetaLabels(ls, svc.ObjectMeta, RoleService) - for k, v := range svc.Labels { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(serviceLabelPrefix+ln)] = lv(v) - ls[model.LabelName(serviceLabelPresentPrefix+ln)] = presentValue - } - - for k, v := range svc.Annotations { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(serviceAnnotationPrefix+ln)] = lv(v) - ls[model.LabelName(serviceAnnotationPresentPrefix+ln)] = presentValue - } return ls } From 98ffad01b80023d923bcab3d151870d53fd3a422 Mon Sep 17 00:00:00 2001 From: "renzheng.wang" Date: Sat, 25 Jun 2022 21:58:44 +0800 Subject: [PATCH 171/632] update tests and docs Signed-off-by: renzheng.wang --- discovery/kubernetes/endpoints_test.go | 79 +++++++++++++--------- discovery/kubernetes/endpointslice_test.go | 28 ++++++++ docs/configuration/configuration.md | 6 ++ 3 files changed, 82 insertions(+), 31 deletions(-) diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index 5aa58bdc4..346bf6133 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -32,6 +32,9 @@ func makeEndpoints() *v1.Endpoints { ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "default", + Annotations: map[string]string{ + "test.annotation": "test", + }, }, Subsets: []v1.EndpointSubset{ { @@ -134,8 +137,10 @@ func TestEndpointsDiscoveryBeforeRun(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_endpoints_annotation_test_annotation": "test", + "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", }, Source: "endpoints/default/testendpoints", }, @@ -434,11 +439,13 @@ func TestEndpointsDiscoveryWithService(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_endpoints_name": "testendpoints", - "__meta_kubernetes_service_label_app_name": "test", - "__meta_kubernetes_service_labelpresent_app_name": "true", - "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_service_label_app_name": "test", + "__meta_kubernetes_service_labelpresent_app_name": "true", + "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_endpoints_annotation_test_annotation": "test", + "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", }, Source: "endpoints/default/testendpoints", }, @@ -510,13 +517,15 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_endpoints_name": "testendpoints", - "__meta_kubernetes_service_label_app_name": "svc", - "__meta_kubernetes_service_labelpresent_app_name": "true", - "__meta_kubernetes_service_name": "testendpoints", - "__meta_kubernetes_service_label_component": "testing", - "__meta_kubernetes_service_labelpresent_component": "true", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_service_label_app_name": "svc", + "__meta_kubernetes_service_labelpresent_app_name": "true", + "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_service_label_component": "testing", + "__meta_kubernetes_service_labelpresent_component": "true", + "__meta_kubernetes_endpoints_annotation_test_annotation": "test", + "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", }, Source: "endpoints/default/testendpoints", }, @@ -583,11 +592,13 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_endpoints_name": "testendpoints", - "__meta_kubernetes_service_label_app_name": "test", - "__meta_kubernetes_service_labelpresent_app_name": "true", - "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_service_label_app_name": "test", + "__meta_kubernetes_service_labelpresent_app_name": "true", + "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_endpoints_annotation_test_annotation": "test", + "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", }, Source: "endpoints/default/testendpoints", }, @@ -658,11 +669,13 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_endpoints_name": "testendpoints", - "__meta_kubernetes_service_label_app_name": "test", - "__meta_kubernetes_service_labelpresent_app_name": "true", - "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_service_label_app_name": "test", + "__meta_kubernetes_service_labelpresent_app_name": "true", + "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_endpoints_annotation_test_annotation": "test", + "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", }, Source: "endpoints/default/testendpoints", }, @@ -777,11 +790,13 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_namespace": "ns1", - "__meta_kubernetes_endpoints_name": "testendpoints", - "__meta_kubernetes_service_label_app": "app1", - "__meta_kubernetes_service_labelpresent_app": "true", - "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_namespace": "ns1", + "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_endpoints_annotation_test_annotation": "test", + "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", + "__meta_kubernetes_service_label_app": "app1", + "__meta_kubernetes_service_labelpresent_app": "true", + "__meta_kubernetes_service_name": "testendpoints", }, Source: "endpoints/ns1/testendpoints", }, @@ -901,8 +916,10 @@ func TestEndpointsDiscoveryOwnNamespace(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_namespace": "own-ns", - "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_namespace": "own-ns", + "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_endpoints_annotation_test_annotation": "test", + "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", }, Source: "endpoints/own-ns/testendpoints", }, diff --git a/discovery/kubernetes/endpointslice_test.go b/discovery/kubernetes/endpointslice_test.go index 42e64a056..5bc6bd41d 100644 --- a/discovery/kubernetes/endpointslice_test.go +++ b/discovery/kubernetes/endpointslice_test.go @@ -52,6 +52,9 @@ func makeEndpointSliceV1() *v1.EndpointSlice { Labels: map[string]string{ v1.LabelServiceName: "testendpoints", }, + Annotations: map[string]string{ + "test.annotation": "test", + }, }, AddressType: v1.AddressTypeIPv4, Ports: []v1.EndpointPort{ @@ -114,6 +117,9 @@ func makeEndpointSliceV1beta1() *v1beta1.EndpointSlice { Labels: map[string]string{ v1beta1.LabelServiceName: "testendpoints", }, + Annotations: map[string]string{ + "test.annotation": "test", + }, }, AddressType: v1beta1.AddressTypeIPv4, Ports: []v1beta1.EndpointPort{ @@ -224,6 +230,8 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", }, Source: "endpointslice/default/testendpoints", }, @@ -287,6 +295,8 @@ func TestEndpointSliceDiscoveryBeforeRunV1beta1(t *testing.T) { "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", }, Source: "endpointslice/default/testendpoints", }, @@ -486,6 +496,8 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) { "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", "__meta_kubernetes_namespace": "default", }, }, @@ -584,6 +596,8 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) { "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", "__meta_kubernetes_namespace": "default", }, }, @@ -671,6 +685,8 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) { "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", "__meta_kubernetes_namespace": "default", }, Source: "endpointslice/default/testendpoints", @@ -753,6 +769,8 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) { "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", "__meta_kubernetes_namespace": "default", "__meta_kubernetes_service_label_app_name": "test", "__meta_kubernetes_service_labelpresent_app_name": "true", @@ -851,6 +869,8 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", "__meta_kubernetes_namespace": "default", "__meta_kubernetes_service_label_app_name": "svc", "__meta_kubernetes_service_label_component": "testing", @@ -945,6 +965,8 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", "__meta_kubernetes_namespace": "default", "__meta_kubernetes_service_label_app_name": "test", "__meta_kubernetes_service_labelpresent_app_name": "true", @@ -1043,6 +1065,8 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", "__meta_kubernetes_namespace": "default", "__meta_kubernetes_service_label_app_name": "test", "__meta_kubernetes_service_labelpresent_app_name": "true", @@ -1181,6 +1205,8 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", "__meta_kubernetes_namespace": "ns1", "__meta_kubernetes_service_label_app": "app1", "__meta_kubernetes_service_labelpresent_app": "true", @@ -1328,6 +1354,8 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) { "__meta_kubernetes_namespace": "own-ns", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", + "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", + "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", }, Source: "endpointslice/own-ns/testendpoints", }, diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index b0b587e02..a528be7b7 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -1970,6 +1970,8 @@ Available meta labels: * `__meta_kubernetes_endpoints_name`: The names of the endpoints object. * `__meta_kubernetes_endpoints_label_`: Each label from the endpoints object. * `__meta_kubernetes_endpoints_labelpresent_`: `true` for each label from the endpoints object. +* `__meta_kubernetes_endpoints_annotation_`: Each annotation from the endpoints object. +* `__meta_kubernetes_endpoints_annotationpresent_`: `true` for each annotation from the endpoints object. * For all targets discovered directly from the endpoints list (those not additionally inferred from underlying pods), the following labels are attached: * `__meta_kubernetes_endpoint_hostname`: Hostname of the endpoint. @@ -1992,6 +1994,10 @@ Available meta labels: * `__meta_kubernetes_namespace`: The namespace of the endpoints object. * `__meta_kubernetes_endpointslice_name`: The name of endpointslice object. +* `__meta_kubernetes_endpointslice_label_`: Each label from the endpointslice object. +* `__meta_kubernetes_endpointslice_labelpresent_`: `true` for each label from the endpointslice object. +* `__meta_kubernetes_endpointslice_annotation_`: Each annotation from the endpointslice object. +* `__meta_kubernetes_endpointslice_annotationpresent_`: `true` for each annotation from the endpointslice object. * For all targets discovered directly from the endpointslice list (those not additionally inferred from underlying pods), the following labels are attached: * `__meta_kubernetes_endpointslice_address_target_kind`: Kind of the referenced object. From b2c5de2e65aeb5c3faba24538bf0290f3ed75f57 Mon Sep 17 00:00:00 2001 From: "renzheng.wang" Date: Sat, 25 Jun 2022 22:40:47 +0800 Subject: [PATCH 172/632] fix lint issue Signed-off-by: renzheng.wang --- discovery/kubernetes/endpointslice.go | 1 - discovery/kubernetes/kubernetes.go | 3 ++- discovery/kubernetes/service.go | 19 +++++++------------ 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index ed23a95b3..a8b492f47 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -252,7 +252,6 @@ func endpointSliceSourceFromNamespaceAndName(namespace, name string) string { } const ( - endpointSliceNameLabel = metaLabelPrefix + "endpointslice_name" endpointSliceAddressTypeLabel = metaLabelPrefix + "endpointslice_address_type" endpointSlicePortNameLabel = metaLabelPrefix + "endpointslice_port_name" endpointSlicePortProtocolLabel = metaLabelPrefix + "endpointslice_port_protocol" diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index d45ac4193..ca5ee49e2 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -17,13 +17,14 @@ import ( "context" "errors" "fmt" - "github.com/prometheus/prometheus/util/strutil" "os" "reflect" "strings" "sync" "time" + "github.com/prometheus/prometheus/util/strutil" + disv1beta1 "k8s.io/api/discovery/v1beta1" "github.com/go-kit/log" diff --git a/discovery/kubernetes/service.go b/discovery/kubernetes/service.go index 96cac3365..7addf0054 100644 --- a/discovery/kubernetes/service.go +++ b/discovery/kubernetes/service.go @@ -146,18 +146,13 @@ func serviceSourceFromNamespaceAndName(namespace, name string) string { } const ( - serviceNameLabel = metaLabelPrefix + "service_name" - serviceLabelPrefix = metaLabelPrefix + "service_label_" - serviceLabelPresentPrefix = metaLabelPrefix + "service_labelpresent_" - serviceAnnotationPrefix = metaLabelPrefix + "service_annotation_" - serviceAnnotationPresentPrefix = metaLabelPrefix + "service_annotationpresent_" - servicePortNameLabel = metaLabelPrefix + "service_port_name" - servicePortNumberLabel = metaLabelPrefix + "service_port_number" - servicePortProtocolLabel = metaLabelPrefix + "service_port_protocol" - serviceClusterIPLabel = metaLabelPrefix + "service_cluster_ip" - serviceLoadBalancerIP = metaLabelPrefix + "service_loadbalancer_ip" - serviceExternalNameLabel = metaLabelPrefix + "service_external_name" - serviceType = metaLabelPrefix + "service_type" + servicePortNameLabel = metaLabelPrefix + "service_port_name" + servicePortNumberLabel = metaLabelPrefix + "service_port_number" + servicePortProtocolLabel = metaLabelPrefix + "service_port_protocol" + serviceClusterIPLabel = metaLabelPrefix + "service_cluster_ip" + serviceLoadBalancerIP = metaLabelPrefix + "service_loadbalancer_ip" + serviceExternalNameLabel = metaLabelPrefix + "service_external_name" + serviceType = metaLabelPrefix + "service_type" ) func serviceLabels(svc *apiv1.Service) model.LabelSet { From dfae954dc1137568f33564e8cffda321f2867925 Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Tue, 30 May 2023 08:42:38 -0700 Subject: [PATCH 173/632] Improving Performance on the API Gzip Handler (#12363) Using github.com/klauspost/compress package to replace the current Gzip Handler on the API. We see significant improvements using this handler over the current one as shown in the benchmark added. Also: * move selection of compression from `newCompressedResponseWriter` to `*CompressionHandler.ServeHTTP`. * renaming `compressedResponseWriter` since it now only does one kind of compression. Signed-off-by: Alan Protasio --- go.mod | 1 + go.sum | 1 + util/httputil/compression.go | 74 ++++++++----------- util/httputil/compression_test.go | 115 +++++++++++++++++++++++++++--- 4 files changed, 139 insertions(+), 52 deletions(-) diff --git a/go.mod b/go.mod index 15daecc2c..9b826ad33 100644 --- a/go.mod +++ b/go.mod @@ -32,6 +32,7 @@ require ( github.com/hetznercloud/hcloud-go v1.45.1 github.com/ionos-cloud/sdk-go/v6 v6.1.6 github.com/json-iterator/go v1.1.12 + github.com/klauspost/compress v1.13.6 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/linode/linodego v1.16.1 github.com/miekg/dns v1.1.53 diff --git a/go.sum b/go.sum index 18d98dfae..3a472c99a 100644 --- a/go.sum +++ b/go.sum @@ -499,6 +499,7 @@ github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0Lh github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= diff --git a/util/httputil/compression.go b/util/httputil/compression.go index b96c088cb..5e9276958 100644 --- a/util/httputil/compression.go +++ b/util/httputil/compression.go @@ -14,11 +14,11 @@ package httputil import ( - "compress/gzip" - "compress/zlib" - "io" "net/http" "strings" + + "github.com/klauspost/compress/gzhttp" + "github.com/klauspost/compress/zlib" ) const ( @@ -28,53 +28,27 @@ const ( deflateEncoding = "deflate" ) -// Wrapper around http.Handler which adds suitable response compression based -// on the client's Accept-Encoding headers. -type compressedResponseWriter struct { +// Wrapper around http.ResponseWriter which adds deflate compression +type deflatedResponseWriter struct { http.ResponseWriter - writer io.Writer + writer *zlib.Writer } // Writes HTTP response content data. -func (c *compressedResponseWriter) Write(p []byte) (int, error) { +func (c *deflatedResponseWriter) Write(p []byte) (int, error) { return c.writer.Write(p) } -// Closes the compressedResponseWriter and ensures to flush all data before. -func (c *compressedResponseWriter) Close() { - if zlibWriter, ok := c.writer.(*zlib.Writer); ok { - zlibWriter.Flush() - } - if gzipWriter, ok := c.writer.(*gzip.Writer); ok { - gzipWriter.Flush() - } - if closer, ok := c.writer.(io.Closer); ok { - defer closer.Close() - } +// Close Closes the deflatedResponseWriter and ensures to flush all data before. +func (c *deflatedResponseWriter) Close() { + c.writer.Close() } -// Constructs a new compressedResponseWriter based on client request headers. -func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request) *compressedResponseWriter { - encodings := strings.Split(req.Header.Get(acceptEncodingHeader), ",") - for _, encoding := range encodings { - switch strings.TrimSpace(encoding) { - case gzipEncoding: - writer.Header().Set(contentEncodingHeader, gzipEncoding) - return &compressedResponseWriter{ - ResponseWriter: writer, - writer: gzip.NewWriter(writer), - } - case deflateEncoding: - writer.Header().Set(contentEncodingHeader, deflateEncoding) - return &compressedResponseWriter{ - ResponseWriter: writer, - writer: zlib.NewWriter(writer), - } - } - } - return &compressedResponseWriter{ +// Constructs a new deflatedResponseWriter to compress the original writer using 'deflate' compression. +func newDeflateResponseWriter(writer http.ResponseWriter) *deflatedResponseWriter { + return &deflatedResponseWriter{ ResponseWriter: writer, - writer: writer, + writer: zlib.NewWriter(writer), } } @@ -86,7 +60,21 @@ type CompressionHandler struct { // ServeHTTP adds compression to the original http.Handler's ServeHTTP() method. func (c CompressionHandler) ServeHTTP(writer http.ResponseWriter, req *http.Request) { - compWriter := newCompressedResponseWriter(writer, req) - c.Handler.ServeHTTP(compWriter, req) - compWriter.Close() + encodings := strings.Split(req.Header.Get(acceptEncodingHeader), ",") + for _, encoding := range encodings { + switch strings.TrimSpace(encoding) { + case gzipEncoding: + gzhttp.GzipHandler(c.Handler).ServeHTTP(writer, req) + return + case deflateEncoding: + compWriter := newDeflateResponseWriter(writer) + writer.Header().Set(contentEncodingHeader, deflateEncoding) + c.Handler.ServeHTTP(compWriter, req) + compWriter.Close() + return + default: + c.Handler.ServeHTTP(writer, req) + return + } + } } diff --git a/util/httputil/compression_test.go b/util/httputil/compression_test.go index 851279761..b7148fc1c 100644 --- a/util/httputil/compression_test.go +++ b/util/httputil/compression_test.go @@ -17,23 +17,30 @@ import ( "bytes" "compress/gzip" "compress/zlib" + "encoding/json" + "fmt" "io" "net/http" "net/http/httptest" + "strings" "testing" "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/labels" ) var ( - mux *http.ServeMux - server *httptest.Server + mux *http.ServeMux + server *httptest.Server + respBody = strings.Repeat("Hello World!", 500) ) func setup() func() { mux = http.NewServeMux() server = httptest.NewServer(mux) return func() { + server.CloseClientConnections() server.Close() } } @@ -41,7 +48,7 @@ func setup() func() { func getCompressionHandlerFunc() CompressionHandler { hf := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte("Hello World!")) + w.Write([]byte(respBody)) } return CompressionHandler{ Handler: http.HandlerFunc(hf), @@ -67,9 +74,8 @@ func TestCompressionHandler_PlainText(t *testing.T) { contents, err := io.ReadAll(resp.Body) require.NoError(t, err, "unexpected error while creating the response body reader") - expected := "Hello World!" actual := string(contents) - require.Equal(t, expected, actual, "expected response with content") + require.Equal(t, respBody, actual, "expected response with content") } func TestCompressionHandler_Gzip(t *testing.T) { @@ -103,8 +109,7 @@ func TestCompressionHandler_Gzip(t *testing.T) { require.NoError(t, err, "unexpected error while reading the response body") actual := buf.String() - expected := "Hello World!" - require.Equal(t, expected, actual, "unexpected response content") + require.Equal(t, respBody, actual, "unexpected response content") } func TestCompressionHandler_Deflate(t *testing.T) { @@ -138,6 +143,98 @@ func TestCompressionHandler_Deflate(t *testing.T) { require.NoError(t, err, "unexpected error while reading the response body") actual := buf.String() - expected := "Hello World!" - require.Equal(t, expected, actual, "expected response with content") + require.Equal(t, respBody, actual, "expected response with content") +} + +func Benchmark_compression(b *testing.B) { + client := &http.Client{ + Transport: &http.Transport{ + DisableCompression: true, + }, + } + + cases := map[string]struct { + enc string + numberOfLabels int + }{ + "gzip-10-labels": { + enc: gzipEncoding, + numberOfLabels: 10, + }, + "gzip-100-labels": { + enc: gzipEncoding, + numberOfLabels: 100, + }, + "gzip-1K-labels": { + enc: gzipEncoding, + numberOfLabels: 1000, + }, + "gzip-10K-labels": { + enc: gzipEncoding, + numberOfLabels: 10000, + }, + "gzip-100K-labels": { + enc: gzipEncoding, + numberOfLabels: 100000, + }, + "gzip-1M-labels": { + enc: gzipEncoding, + numberOfLabels: 1000000, + }, + } + + for name, tc := range cases { + b.Run(name, func(b *testing.B) { + tearDown := setup() + defer tearDown() + labels := labels.ScratchBuilder{} + + for i := 0; i < tc.numberOfLabels; i++ { + labels.Add(fmt.Sprintf("Name%v", i), fmt.Sprintf("Value%v", i)) + } + + respBody, err := json.Marshal(labels.Labels()) + require.NoError(b, err) + + hf := func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write(respBody) + } + h := CompressionHandler{ + Handler: http.HandlerFunc(hf), + } + + mux.Handle("/foo_endpoint", h) + + req, _ := http.NewRequest("GET", server.URL+"/foo_endpoint", nil) + req.Header.Set(acceptEncodingHeader, tc.enc) + + b.ReportAllocs() + b.ResetTimer() + + // Reusing the array to read the body and avoid allocation on the test + encRespBody := make([]byte, len(respBody)) + + for i := 0; i < b.N; i++ { + resp, err := client.Do(req) + + require.NoError(b, err) + + require.NoError(b, err, "client get failed with unexpected error") + responseBodySize := 0 + for { + n, err := resp.Body.Read(encRespBody) + responseBodySize += n + if err == io.EOF { + break + } + } + + b.ReportMetric(float64(responseBodySize), "ContentLength") + resp.Body.Close() + } + + client.CloseIdleConnections() + }) + } } From ca6580828aeef078370116b86ff3fbe9759b75ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Wed, 31 May 2023 15:17:44 +0200 Subject: [PATCH 174/632] feat: support histogram and summary metric types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- util/fmtutil/format.go | 171 +++++++++++++++++++++++++----------- util/fmtutil/format_test.go | 140 ++++++++++++++++++++++++++++- 2 files changed, 256 insertions(+), 55 deletions(-) diff --git a/util/fmtutil/format.go b/util/fmtutil/format.go index b5bb9469c..291308dc2 100644 --- a/util/fmtutil/format.go +++ b/util/fmtutil/format.go @@ -14,6 +14,7 @@ package fmtutil import ( + "errors" "fmt" "io" "sort" @@ -26,6 +27,12 @@ import ( "github.com/prometheus/prometheus/prompb" ) +const ( + sumStr = "_sum" + countStr = "_count" + bucketStr = "_bucket" +) + var MetricMetadataTypeValue = map[string]int32{ "UNKNOWN": 0, "COUNTER": 1, @@ -60,71 +67,129 @@ func FormatMetrics(mf map[string]*dto.MetricFamily, extraLabels map[string]strin wr.Metadata = append(wr.Metadata, metadata) for _, metric := range mf[metricName].Metric { - var timeserie prompb.TimeSeries - - // build labels map - labels := make(map[string]string, len(metric.Label)+len(extraLabels)) - labels[model.MetricNameLabel] = metricName - - // add extra labels - for key, value := range extraLabels { - labels[key] = value + labels := makeLabelsMap(metric, metricName, extraLabels) + if err := makeTimeseries(wr, labels, metric); err != nil { + return wr, err } - - // add metric labels - for _, label := range metric.Label { - labelname := label.GetName() - if labelname == model.JobLabel { - labelname = fmt.Sprintf("%s%s", model.ExportedLabelPrefix, labelname) - } - labels[labelname] = label.GetValue() - } - - // build labels name list - sortedLabelNames := make([]string, 0, len(labels)) - for label := range labels { - sortedLabelNames = append(sortedLabelNames, label) - } - // sort labels name in lexicographical order - sort.Strings(sortedLabelNames) - - for _, label := range sortedLabelNames { - timeserie.Labels = append(timeserie.Labels, prompb.Label{ - Name: label, - Value: labels[label], - }) - } - - timestamp := metric.GetTimestampMs() - if timestamp == 0 { - timestamp = time.Now().UnixNano() / int64(time.Millisecond) - } - - timeserie.Samples = []prompb.Sample{ - { - Timestamp: timestamp, - Value: getMetricsValue(metric), - }, - } - - wr.Timeseries = append(wr.Timeseries, timeserie) } } return wr, nil } -// getMetricsValue return the value of a timeserie without the need to give value type -func getMetricsValue(m *dto.Metric) float64 { +func makeTimeserie(wr *prompb.WriteRequest, labels map[string]string, timestamp int64, value float64) { + var timeserie prompb.TimeSeries + timeserie.Labels = makeLabels(labels) + timeserie.Samples = []prompb.Sample{ + { + Timestamp: timestamp, + Value: value, + }, + } + wr.Timeseries = append(wr.Timeseries, timeserie) +} + +func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Metric) error { + var err error + + timestamp := m.GetTimestampMs() + if timestamp == 0 { + timestamp = time.Now().UnixNano() / int64(time.Millisecond) + } + switch { case m.Gauge != nil: - return m.GetGauge().GetValue() + makeTimeserie(wr, labels, timestamp, m.GetGauge().GetValue()) case m.Counter != nil: - return m.GetCounter().GetValue() + makeTimeserie(wr, labels, timestamp, m.GetCounter().GetValue()) + case m.Summary != nil: + metricName := labels[model.MetricNameLabel] + // Preserve metric name order with first quantile labels timeseries then sum suffix timeserie and finally count suffix timeserie + // Add Summary quantile timeseries + quantileLabels := make(map[string]string, len(labels)+1) + for key, value := range labels { + quantileLabels[key] = value + } + + for _, q := range m.GetSummary().Quantile { + quantileLabels[model.QuantileLabel] = fmt.Sprint(q.GetQuantile()) + makeTimeserie(wr, quantileLabels, timestamp, q.GetValue()) + } + // Overwrite label model.MetricNameLabel for count and sum metrics + // Add Summary sum timeserie + labels[model.MetricNameLabel] = metricName + sumStr + makeTimeserie(wr, labels, timestamp, m.GetSummary().GetSampleSum()) + // Add Summary count timeserie + labels[model.MetricNameLabel] = metricName + countStr + makeTimeserie(wr, labels, timestamp, float64(m.GetSummary().GetSampleCount())) + + case m.Histogram != nil: + metricName := labels[model.MetricNameLabel] + // Preserve metric name order with first bucket suffix timeseries then sum suffix timeserie and finally count suffix timeserie + // Add Histogram bucket timeseries + bucketLabels := make(map[string]string, len(labels)+1) + for key, value := range labels { + bucketLabels[key] = value + } + for _, b := range m.GetHistogram().Bucket { + bucketLabels[model.MetricNameLabel] = metricName + bucketStr + bucketLabels[model.BucketLabel] = fmt.Sprint(b.GetUpperBound()) + makeTimeserie(wr, bucketLabels, timestamp, float64(b.GetCumulativeCount())) + } + // Overwrite label model.MetricNameLabel for count and sum metrics + // Add Histogram sum timeserie + labels[model.MetricNameLabel] = metricName + sumStr + makeTimeserie(wr, labels, timestamp, m.GetHistogram().GetSampleSum()) + // Add Histogram count timeserie + labels[model.MetricNameLabel] = metricName + countStr + makeTimeserie(wr, labels, timestamp, float64(m.GetHistogram().GetSampleCount())) + case m.Untyped != nil: - return m.GetUntyped().GetValue() + makeTimeserie(wr, labels, timestamp, m.GetUntyped().GetValue()) default: - return 0. + err = errors.New("unsupported metric type") } + return err +} + +func makeLabels(labelsMap map[string]string) []prompb.Label { + // build labels name list + sortedLabelNames := make([]string, 0, len(labelsMap)) + for label := range labelsMap { + sortedLabelNames = append(sortedLabelNames, label) + } + // sort labels name in lexicographical order + sort.Strings(sortedLabelNames) + + var labels []prompb.Label + for _, label := range sortedLabelNames { + labels = append(labels, prompb.Label{ + Name: label, + Value: labelsMap[label], + }) + } + return labels +} + +func makeLabelsMap(m *dto.Metric, metricName string, extraLabels map[string]string) map[string]string { + // build labels map + labels := make(map[string]string, len(m.Label)+len(extraLabels)) + labels[model.MetricNameLabel] = metricName + + // add extra labels + for key, value := range extraLabels { + labels[key] = value + } + + // add metric labels + for _, label := range m.Label { + labelname := label.GetName() + if labelname == model.JobLabel { + labelname = fmt.Sprintf("%s%s", model.ExportedLabelPrefix, labelname) + } + labels[labelname] = label.GetValue() + } + + return labels } // ParseMetricsTextReader consumes an io.Reader and returns the MetricFamily. diff --git a/util/fmtutil/format_test.go b/util/fmtutil/format_test.go index 9deed2de9..e2ac30135 100644 --- a/util/fmtutil/format_test.go +++ b/util/fmtutil/format_test.go @@ -24,13 +24,130 @@ import ( var writeRequestFixture = &prompb.WriteRequest{ Metadata: []prompb.MetricMetadata{ + { + MetricFamilyName: "http_request_duration_seconds", + Type: 3, + Help: "A histogram of the request duration.", + }, + { + MetricFamilyName: "http_requests_total", + Type: 1, + Help: "The total number of HTTP requests.", + }, + { + MetricFamilyName: "rpc_duration_seconds", + Type: 5, + Help: "A summary of the RPC duration in seconds.", + }, { MetricFamilyName: "test_metric1", Type: 2, - Help: "this is a test metric", + Help: "This is a test metric.", }, }, Timeseries: []prompb.TimeSeries{ + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "http_request_duration_seconds_bucket"}, + {Name: "job", Value: "promtool"}, + {Name: "le", Value: "0.1"}, + }, + Samples: []prompb.Sample{{Value: 33444, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "http_request_duration_seconds_bucket"}, + {Name: "job", Value: "promtool"}, + {Name: "le", Value: "0.5"}, + }, + Samples: []prompb.Sample{{Value: 129389, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "http_request_duration_seconds_bucket"}, + {Name: "job", Value: "promtool"}, + {Name: "le", Value: "1"}, + }, + Samples: []prompb.Sample{{Value: 133988, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "http_request_duration_seconds_bucket"}, + {Name: "job", Value: "promtool"}, + {Name: "le", Value: "+Inf"}, + }, + Samples: []prompb.Sample{{Value: 144320, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "http_request_duration_seconds_sum"}, + {Name: "job", Value: "promtool"}, + }, + Samples: []prompb.Sample{{Value: 53423, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "http_request_duration_seconds_count"}, + {Name: "job", Value: "promtool"}, + }, + Samples: []prompb.Sample{{Value: 144320, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "http_requests_total"}, + {Name: "code", Value: "200"}, + {Name: "job", Value: "promtool"}, + {Name: "method", Value: "post"}, + }, + Samples: []prompb.Sample{{Value: 1027, Timestamp: 1395066363000}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "http_requests_total"}, + {Name: "code", Value: "400"}, + {Name: "job", Value: "promtool"}, + {Name: "method", Value: "post"}, + }, + Samples: []prompb.Sample{{Value: 3, Timestamp: 1395066363000}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "rpc_duration_seconds"}, + {Name: "job", Value: "promtool"}, + {Name: "quantile", Value: "0.01"}, + }, + Samples: []prompb.Sample{{Value: 3102, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "rpc_duration_seconds"}, + {Name: "job", Value: "promtool"}, + {Name: "quantile", Value: "0.5"}, + }, + Samples: []prompb.Sample{{Value: 4773, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "rpc_duration_seconds"}, + {Name: "job", Value: "promtool"}, + {Name: "quantile", Value: "0.99"}, + }, + Samples: []prompb.Sample{{Value: 76656, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "rpc_duration_seconds_sum"}, + {Name: "job", Value: "promtool"}, + }, + Samples: []prompb.Sample{{Value: 1.7560473e+07, Timestamp: 1}}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "rpc_duration_seconds_count"}, + {Name: "job", Value: "promtool"}, + }, + Samples: []prompb.Sample{{Value: 2693, Timestamp: 1}}, + }, { Labels: []prompb.Label{ {Name: "__name__", Value: "test_metric1"}, @@ -58,7 +175,26 @@ var writeRequestFixture = &prompb.WriteRequest{ func TestParseMetricsTextAndFormat(t *testing.T) { input := bytes.NewReader([]byte(` - # HELP test_metric1 this is a test metric + # HELP http_request_duration_seconds A histogram of the request duration. + # TYPE http_request_duration_seconds histogram + http_request_duration_seconds_bucket{le="0.1"} 33444 1 + http_request_duration_seconds_bucket{le="0.5"} 129389 1 + http_request_duration_seconds_bucket{le="1"} 133988 1 + http_request_duration_seconds_bucket{le="+Inf"} 144320 1 + http_request_duration_seconds_sum 53423 1 + http_request_duration_seconds_count 144320 1 + # HELP http_requests_total The total number of HTTP requests. + # TYPE http_requests_total counter + http_requests_total{method="post",code="200"} 1027 1395066363000 + http_requests_total{method="post",code="400"} 3 1395066363000 + # HELP rpc_duration_seconds A summary of the RPC duration in seconds. + # TYPE rpc_duration_seconds summary + rpc_duration_seconds{quantile="0.01"} 3102 1 + rpc_duration_seconds{quantile="0.5"} 4773 1 + rpc_duration_seconds{quantile="0.99"} 76656 1 + rpc_duration_seconds_sum 1.7560473e+07 1 + rpc_duration_seconds_count 2693 1 + # HELP test_metric1 This is a test metric. # TYPE test_metric1 gauge test_metric1{b="c",baz="qux",d="e",foo="bar"} 1 1 test_metric1{b="c",baz="qux",d="e",foo="bar"} 2 1 From 6ae4a46845295e88c353de0dd42caa2b0fe1b46f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Thu, 1 Jun 2023 10:28:55 +0200 Subject: [PATCH 175/632] feat: enhance stdin check and add tests parsing error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- cmd/promtool/main.go | 2 +- cmd/promtool/metrics.go | 90 +++++++++++++++++------------------ docs/command-line/promtool.md | 2 +- util/fmtutil/format_test.go | 27 ++++++++++- 4 files changed, 71 insertions(+), 50 deletions(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index c76790e13..bcfcc2422 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -185,7 +185,7 @@ func main() { pushMetricsCmd.Arg("remote-write-url", "Prometheus remote write url to push metrics.").Required().URLVar(&remoteWriteURL) metricFiles := pushMetricsCmd.Arg( "metric-files", - "The metric files to push, default is read from standard input (STDIN).", + "The metric files to push, default is read from standard input.", ).ExistingFiles() pushMetricsLabels := pushMetricsCmd.Flag("label", "Label to attach to metrics. Can be specified multiple times.").Default("job=promtool").StringMap() pushMetricsTimeout := pushMetricsCmd.Flag("timeout", "The time to wait for pushing metrics.").Default("30s").Duration() diff --git a/cmd/promtool/metrics.go b/cmd/promtool/metrics.go index 8abe32cf4..4a6fafd40 100644 --- a/cmd/promtool/metrics.go +++ b/cmd/promtool/metrics.go @@ -33,8 +33,6 @@ import ( // Push metrics to a prometheus remote write (for testing purpose only). func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, labels map[string]string, files ...string) int { - failed := false - addressURL, err := url.Parse(url.String()) if err != nil { fmt.Fprintln(os.Stderr, err) @@ -63,63 +61,37 @@ func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[strin headers: headers, } - // add empty string to avoid matching filename + var data []byte + var failed bool + if len(files) == 0 { - files = append(files, "") + data, err = io.ReadAll(os.Stdin) + if err != nil { + fmt.Fprintln(os.Stderr, " FAILED:", err) + return failureExitCode + } + fmt.Printf("Parsing standard input\n") + if parseAndPushMetrics(client, data, labels) { + fmt.Printf(" SUCCESS: metrics pushed to remote write.\n") + return successExitCode + } + return failureExitCode } for _, file := range files { - var data []byte - var err error - - // if file is an empty string it is a stdin - if file == "" { - data, err = io.ReadAll(os.Stdin) - if err != nil { - fmt.Fprintln(os.Stderr, " FAILED:", err) - failed = true - break - } - - fmt.Printf("Parsing input from stdin\n") - } else { - data, err = os.ReadFile(file) - if err != nil { - fmt.Fprintln(os.Stderr, " FAILED:", err) - failed = true - continue - } - - fmt.Printf("Parsing input from metric file %s\n", file) - } - metricsData, err := fmtutil.ParseMetricsTextAndFormat(bytes.NewReader(data), labels) + data, err = os.ReadFile(file) if err != nil { fmt.Fprintln(os.Stderr, " FAILED:", err) failed = true continue } - raw, err := metricsData.Marshal() - if err != nil { - fmt.Fprintln(os.Stderr, " FAILED:", err) - failed = true + fmt.Printf("Parsing metrics file %s\n", file) + if parseAndPushMetrics(client, data, labels) { + fmt.Printf(" SUCCESS: metrics file %s pushed to remote write.\n", file) continue } - - // Encode the request body into snappy encoding. - compressed := snappy.Encode(nil, raw) - err = client.Store(context.Background(), compressed) - if err != nil { - fmt.Fprintln(os.Stderr, " FAILED:", err) - failed = true - continue - } - - if file == "" { - fmt.Printf(" SUCCESS: metric pushed to remote write.\n") - } else { - fmt.Printf(" SUCCESS: metric file %s pushed to remote write.\n", file) - } + failed = true } if failed { @@ -129,6 +101,30 @@ func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[strin return successExitCode } +func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]string) bool { + metricsData, err := fmtutil.ParseMetricsTextAndFormat(bytes.NewReader(data), labels) + if err != nil { + fmt.Fprintln(os.Stderr, " FAILED:", err) + return false + } + + raw, err := metricsData.Marshal() + if err != nil { + fmt.Fprintln(os.Stderr, " FAILED:", err) + return false + } + + // Encode the request body into snappy encoding. + compressed := snappy.Encode(nil, raw) + err = client.Store(context.Background(), compressed) + if err != nil { + fmt.Fprintln(os.Stderr, " FAILED:", err) + return false + } + + return true +} + type setHeadersTransport struct { http.RoundTripper headers map[string]string diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index c78900b99..c36caaf61 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -410,7 +410,7 @@ Push metrics to a prometheus remote write (for testing purpose only). | Argument | Description | Required | | --- | --- | --- | | remote-write-url | Prometheus remote write url to push metrics. | Yes | -| metric-files | The metric files to push, default is read from standard input (STDIN). | | +| metric-files | The metric files to push, default is read from standard input. | | diff --git a/util/fmtutil/format_test.go b/util/fmtutil/format_test.go index e2ac30135..5c1ab5bde 100644 --- a/util/fmtutil/format_test.go +++ b/util/fmtutil/format_test.go @@ -173,7 +173,7 @@ var writeRequestFixture = &prompb.WriteRequest{ }, } -func TestParseMetricsTextAndFormat(t *testing.T) { +func TestParseAndPushMetricsTextAndFormat(t *testing.T) { input := bytes.NewReader([]byte(` # HELP http_request_duration_seconds A histogram of the request duration. # TYPE http_request_duration_seconds histogram @@ -206,3 +206,28 @@ func TestParseMetricsTextAndFormat(t *testing.T) { require.Equal(t, writeRequestFixture, expected) } + +func TestParseMetricsTextAndFormatErrorParsingFloatValue(t *testing.T) { + input := bytes.NewReader([]byte(` + # HELP http_requests_total The total number of HTTP requests. + # TYPE http_requests_total counter + http_requests_total{method="post",code="200"} 1027Error 1395066363000 + http_requests_total{method="post",code="400"} 3 1395066363000 + `)) + labels := map[string]string{"job": "promtool"} + + _, err := ParseMetricsTextAndFormat(input, labels) + require.Equal(t, err.Error(), "text format parsing error in line 4: expected float as value, got \"1027Error\"") +} + +func TestParseMetricsTextAndFormatErrorParsingMetricType(t *testing.T) { + input := bytes.NewReader([]byte(` + # HELP node_info node info summary. + # TYPE node_info info + node_info{test="summary"} 1 1395066363000 + `)) + labels := map[string]string{"job": "promtool"} + + _, err := ParseMetricsTextAndFormat(input, labels) + require.Equal(t, err.Error(), "text format parsing error in line 3: unknown metric type \"info\"") +} From a8772a41782758e8153c494dcd207e770d8421a4 Mon Sep 17 00:00:00 2001 From: Nidhey Nitin Indurkar <46122307+nidhey27@users.noreply.github.com> Date: Thu, 1 Jun 2023 17:13:09 +0530 Subject: [PATCH 176/632] Feat: Get block by id directly on promtool analyze & get latest block if ID not provided (#12031) * feat: analyze latest block or block by ID in CLI (promtool) Signed-off-by: nidhey27 * address remarks Signed-off-by: nidhey60@gmail.com * address latest review comments Signed-off-by: nidhey60@gmail.com --------- Signed-off-by: nidhey27 Signed-off-by: nidhey60@gmail.com --- cmd/promtool/tsdb.go | 28 +++++++++-------------- tsdb/db.go | 54 ++++++++++++++++++++++++++++++++++++++++++++ tsdb/db_test.go | 19 +++++++++++++++- 3 files changed, 83 insertions(+), 18 deletions(-) diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 84aa43a9c..b7fad5fe0 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -398,26 +398,20 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) if err != nil { return nil, nil, err } - blocks, err := db.Blocks() + + if blockID == "" { + blockID, err = db.LastBlockID() + if err != nil { + return nil, nil, err + } + } + + b, err := db.Block(blockID) if err != nil { return nil, nil, err } - var block tsdb.BlockReader - switch { - case blockID != "": - for _, b := range blocks { - if b.Meta().ULID.String() == blockID { - block = b - break - } - } - case len(blocks) > 0: - block = blocks[len(blocks)-1] - } - if block == nil { - return nil, nil, fmt.Errorf("block %s not found", blockID) - } - return db, block, nil + + return db, b, nil } func analyzeBlock(path, blockID string, limit int, runExtended bool) error { diff --git a/tsdb/db.go b/tsdb/db.go index 12974150b..32dae57a5 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -607,6 +607,60 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { return blockReaders, nil } +// LastBlockID returns the BlockID of latest block. +func (db *DBReadOnly) LastBlockID() (string, error) { + entries, err := os.ReadDir(db.dir) + if err != nil { + return "", err + } + + max := uint64(0) + + lastBlockID := "" + + for _, e := range entries { + // Check if dir is a block dir or not. + dirName := e.Name() + ulidObj, err := ulid.ParseStrict(dirName) + if err != nil { + continue // Not a block dir. + } + timestamp := ulidObj.Time() + if timestamp > max { + max = timestamp + lastBlockID = dirName + } + } + + if lastBlockID == "" { + return "", errors.New("no blocks found") + } + + return lastBlockID, nil +} + +// Block returns a block reader by given block id. +func (db *DBReadOnly) Block(blockID string) (BlockReader, error) { + select { + case <-db.closed: + return nil, ErrClosed + default: + } + + _, err := os.Stat(filepath.Join(db.dir, blockID)) + if os.IsNotExist(err) { + return nil, errors.Errorf("invalid block ID %s", blockID) + } + + block, err := OpenBlock(db.logger, filepath.Join(db.dir, blockID), nil) + if err != nil { + return nil, err + } + db.closers = append(db.closers, block) + + return block, nil +} + // Close all block readers. func (db *DBReadOnly) Close() error { select { diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 0b980f6ec..427a3b7af 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -2384,6 +2384,7 @@ func TestDBReadOnly(t *testing.T) { dbDir string logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) expBlocks []*Block + expBlock *Block expSeries map[string][]tsdbutil.Sample expChunks map[string][][]tsdbutil.Sample expDBHash []byte @@ -2427,6 +2428,7 @@ func TestDBReadOnly(t *testing.T) { require.NoError(t, app.Commit()) expBlocks = dbWritable.Blocks() + expBlock = expBlocks[0] expDbSize, err := fileutil.DirSize(dbWritable.Dir()) require.NoError(t, err) require.Greater(t, expDbSize, dbSizeBeforeAppend, "db size didn't increase after an append") @@ -2455,7 +2457,22 @@ func TestDBReadOnly(t *testing.T) { require.Equal(t, expBlock.Meta(), blocks[i].Meta(), "block meta mismatch") } }) - + t.Run("block", func(t *testing.T) { + blockID := expBlock.meta.ULID.String() + block, err := dbReadOnly.Block(blockID) + require.NoError(t, err) + require.Equal(t, expBlock.Meta(), block.Meta(), "block meta mismatch") + }) + t.Run("invalid block ID", func(t *testing.T) { + blockID := "01GTDVZZF52NSWB5SXQF0P2PGF" + _, err := dbReadOnly.Block(blockID) + require.Error(t, err) + }) + t.Run("last block ID", func(t *testing.T) { + blockID, err := dbReadOnly.LastBlockID() + require.NoError(t, err) + require.Equal(t, expBlocks[2].Meta().ULID.String(), blockID) + }) t.Run("querier", func(t *testing.T) { // Open a read only db and ensure that the API returns the same result as the normal DB. q, err := dbReadOnly.Querier(context.TODO(), math.MinInt64, math.MaxInt64) From f676d4a756f6833ba2694b2c6600259ff3c62533 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Thu, 1 Jun 2023 17:39:04 +0200 Subject: [PATCH 177/632] feat refactoring checkrules func MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- cmd/promtool/main.go | 102 +++++++++++++++++++++++-------------------- 1 file changed, 55 insertions(+), 47 deletions(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index c55e5be1e..e29d6f160 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -448,20 +448,12 @@ func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files } fmt.Println() - for _, rf := range ruleFiles { - if n, errs := checkRules(rf, lintSettings); len(errs) > 0 { - fmt.Fprintln(os.Stderr, " FAILED:") - for _, err := range errs { - fmt.Fprintln(os.Stderr, " ", err) - } - failed = true - for _, err := range errs { - hasErrors = hasErrors || !errors.Is(err, lintError) - } - } else { - fmt.Printf(" SUCCESS: %d rules found\n", n) - } - fmt.Println() + rulesFailed, rulesHasErrors := checkRules(ruleFiles, lintSettings) + if rulesFailed { + failed = rulesFailed + } + if rulesHasErrors { + hasErrors = rulesHasErrors } } if failed && hasErrors { @@ -689,14 +681,57 @@ func checkSDFile(filename string) ([]*targetgroup.Group, error) { func CheckRules(ls lintConfig, files ...string) int { failed := false hasErrors := false - - // Add empty string to avoid matching filename. if len(files) == 0 { - files = append(files, "") + fmt.Println("Checking standard input") + data, err := io.ReadAll(os.Stdin) + if err != nil { + fmt.Fprintln(os.Stderr, " FAILED:", err) + return failureExitCode + } + rgs, errs := rulefmt.Parse(data) + for _, e := range errs { + fmt.Fprintln(os.Stderr, e.Error()) + return failureExitCode + } + if n, errs := checkRuleGroups(rgs, ls); errs != nil { + fmt.Fprintln(os.Stderr, " FAILED:") + for _, e := range errs { + fmt.Fprintln(os.Stderr, e.Error()) + } + failed = true + for _, err := range errs { + hasErrors = hasErrors || !errors.Is(err, lintError) + } + } else { + fmt.Printf(" SUCCESS: %d rules found\n", n) + } + fmt.Println() + } else { + failed, hasErrors = checkRules(files, ls) } + if failed && hasErrors { + return failureExitCode + } + if failed && ls.fatal { + return lintErrExitCode + } + + return successExitCode +} + +// checkRules validates rule files. +func checkRules(files []string, ls lintConfig) (bool, bool) { + failed := false + hasErrors := false for _, f := range files { - if n, errs := checkRules(f, ls); errs != nil { + fmt.Println("Checking", f) + rgs, errs := rulefmt.ParseFile(f) + if errs != nil { + failed = true + continue + } + if n, errs := checkRuleGroups(rgs, ls); errs != nil { fmt.Fprintln(os.Stderr, " FAILED:") for _, e := range errs { fmt.Fprintln(os.Stderr, e.Error()) @@ -710,37 +745,10 @@ func CheckRules(ls lintConfig, files ...string) int { } fmt.Println() } - if failed && hasErrors { - return failureExitCode - } - if failed && ls.fatal { - return lintErrExitCode - } - return successExitCode + return failed, hasErrors } -func checkRules(filename string, lintSettings lintConfig) (int, []error) { - var rgs *rulefmt.RuleGroups - var errs []error - - // Empty string is stdin input. - if filename == "" { - data, err := io.ReadAll(os.Stdin) - if err != nil { - errs = append(errs, err) - return failureExitCode, errs - } - fmt.Println("Checking stdin") - rgs, errs = rulefmt.Parse(data) - } else { - fmt.Println("Checking", filename) - rgs, errs = rulefmt.ParseFile(filename) - } - - if errs != nil { - return successExitCode, errs - } - +func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings lintConfig) (int, []error) { numRules := 0 for _, rg := range rgs.Groups { numRules += len(rg.Rules) From 1f3821379c23ab5855df88ad33f5bb5ecfd68a03 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 1 Jun 2023 17:17:04 +0000 Subject: [PATCH 178/632] promql: refactor: extract fn to wait on concurrency limit Signed-off-by: Bryan Boreham --- promql/engine.go | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 8a64fdf39..8730466f7 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -589,18 +589,11 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storag execSpanTimer, ctx := q.stats.GetSpanTimer(ctx, stats.ExecTotalTime) defer execSpanTimer.Finish() - queueSpanTimer, _ := q.stats.GetSpanTimer(ctx, stats.ExecQueueTime, ng.metrics.queryQueueTime) - // Log query in active log. The active log guarantees that we don't run over - // MaxConcurrent queries. - if ng.activeQueryTracker != nil { - queryIndex, err := ng.activeQueryTracker.Insert(ctx, q.q) - if err != nil { - queueSpanTimer.Finish() - return nil, nil, contextErr(err, "query queue") - } - defer ng.activeQueryTracker.Delete(queryIndex) + if finish, err := ng.queueActive(ctx, q); err != nil { + return nil, nil, err + } else { + defer finish() } - queueSpanTimer.Finish() // Cancel when execution is done or an error was raised. defer q.cancel() @@ -623,6 +616,18 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storag panic(fmt.Errorf("promql.Engine.exec: unhandled statement of type %T", q.Statement())) } +// Log query in active log. The active log guarantees that we don't run over +// MaxConcurrent queries. +func (ng *Engine) queueActive(ctx context.Context, q *query) (func(), error) { + if ng.activeQueryTracker == nil { + return func() {}, nil + } + queueSpanTimer, _ := q.stats.GetSpanTimer(ctx, stats.ExecQueueTime, ng.metrics.queryQueueTime) + queryIndex, err := ng.activeQueryTracker.Insert(ctx, q.q) + queueSpanTimer.Finish() + return func() { ng.activeQueryTracker.Delete(queryIndex) }, err +} + func timeMilliseconds(t time.Time) int64 { return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) } From 71fc4f1516b8264606daca7bf7c0e011b7f91f2e Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 1 Jun 2023 17:54:17 +0000 Subject: [PATCH 179/632] promql: refactor: create query object before parsing Signed-off-by: Bryan Boreham --- promql/engine.go | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 8730466f7..81fb3c916 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -409,15 +409,15 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { // NewInstantQuery returns an evaluation query for the given expression at the given time. func (ng *Engine) NewInstantQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { + pExpr, qry := ng.newQuery(q, qs, opts, ts, ts, 0) expr, err := parser.ParseExpr(qs) if err != nil { return nil, err } - qry, err := ng.newQuery(q, opts, expr, ts, ts, 0) - if err != nil { + if err := ng.validateOpts(expr); err != nil { return nil, err } - qry.q = qs + *pExpr = PreprocessExpr(expr, ts, ts) return qry, nil } @@ -425,27 +425,23 @@ func (ng *Engine) NewInstantQuery(_ context.Context, q storage.Queryable, opts * // NewRangeQuery returns an evaluation query for the given time range and with // the resolution set by the interval. func (ng *Engine) NewRangeQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { + pExpr, qry := ng.newQuery(q, qs, opts, start, end, interval) expr, err := parser.ParseExpr(qs) if err != nil { return nil, err } + if err := ng.validateOpts(expr); err != nil { + return nil, err + } if expr.Type() != parser.ValueTypeVector && expr.Type() != parser.ValueTypeScalar { return nil, fmt.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", parser.DocumentedType(expr.Type())) } - qry, err := ng.newQuery(q, opts, expr, start, end, interval) - if err != nil { - return nil, err - } - qry.q = qs + *pExpr = PreprocessExpr(expr, start, end) return qry, nil } -func (ng *Engine) newQuery(q storage.Queryable, opts *QueryOpts, expr parser.Expr, start, end time.Time, interval time.Duration) (*query, error) { - if err := ng.validateOpts(expr); err != nil { - return nil, err - } - +func (ng *Engine) newQuery(q storage.Queryable, qs string, opts *QueryOpts, start, end time.Time, interval time.Duration) (*parser.Expr, *query) { // Default to empty QueryOpts if not provided. if opts == nil { opts = &QueryOpts{} @@ -457,20 +453,20 @@ func (ng *Engine) newQuery(q storage.Queryable, opts *QueryOpts, expr parser.Exp } es := &parser.EvalStmt{ - Expr: PreprocessExpr(expr, start, end), Start: start, End: end, Interval: interval, LookbackDelta: lookbackDelta, } qry := &query{ + q: qs, stmt: es, ng: ng, stats: stats.NewQueryTimers(), sampleStats: stats.NewQuerySamples(ng.enablePerStepStats && opts.EnablePerStepStats), queryable: q, } - return qry, nil + return &es.Expr, qry } var ( From bb0d8320ddc3b8f8d4888524a7f631f904a5ec6f Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 1 Jun 2023 18:16:05 +0000 Subject: [PATCH 180/632] promql: include parsing in active-query tracking So that the max-concurrency limit is applied. Signed-off-by: Bryan Boreham --- promql/engine.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 81fb3c916..085a7d23c 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -408,8 +408,13 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { } // NewInstantQuery returns an evaluation query for the given expression at the given time. -func (ng *Engine) NewInstantQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { +func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { pExpr, qry := ng.newQuery(q, qs, opts, ts, ts, 0) + if finish, err := ng.queueActive(ctx, qry); err != nil { + return nil, err + } else { + defer finish() + } expr, err := parser.ParseExpr(qs) if err != nil { return nil, err @@ -424,8 +429,13 @@ func (ng *Engine) NewInstantQuery(_ context.Context, q storage.Queryable, opts * // NewRangeQuery returns an evaluation query for the given time range and with // the resolution set by the interval. -func (ng *Engine) NewRangeQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { +func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { pExpr, qry := ng.newQuery(q, qs, opts, start, end, interval) + if finish, err := ng.queueActive(ctx, qry); err != nil { + return nil, err + } else { + defer finish() + } expr, err := parser.ParseExpr(qs) if err != nil { return nil, err From 67d2ef004d84427626fadb6e62b420b80712a7f9 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 1 Jun 2023 18:36:34 +0000 Subject: [PATCH 181/632] Placate lint I think the version using scoping was better, but I'm out of energy to fight the linter. Signed-off-by: Bryan Boreham --- promql/engine.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 085a7d23c..f29db3a64 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -410,11 +410,11 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { // NewInstantQuery returns an evaluation query for the given expression at the given time. func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { pExpr, qry := ng.newQuery(q, qs, opts, ts, ts, 0) - if finish, err := ng.queueActive(ctx, qry); err != nil { + finishQueue, err := ng.queueActive(ctx, qry) + if err != nil { return nil, err - } else { - defer finish() } + defer finishQueue() expr, err := parser.ParseExpr(qs) if err != nil { return nil, err @@ -431,11 +431,11 @@ func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts // the resolution set by the interval. func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { pExpr, qry := ng.newQuery(q, qs, opts, start, end, interval) - if finish, err := ng.queueActive(ctx, qry); err != nil { + finishQueue, err := ng.queueActive(ctx, qry) + if err != nil { return nil, err - } else { - defer finish() } + defer finishQueue() expr, err := parser.ParseExpr(qs) if err != nil { return nil, err @@ -595,11 +595,11 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storag execSpanTimer, ctx := q.stats.GetSpanTimer(ctx, stats.ExecTotalTime) defer execSpanTimer.Finish() - if finish, err := ng.queueActive(ctx, q); err != nil { + finishQueue, err := ng.queueActive(ctx, q) + if err != nil { return nil, nil, err - } else { - defer finish() } + defer finishQueue() // Cancel when execution is done or an error was raised. defer q.cancel() From b1675e23af84e6b156da0c17b5b12736775896b1 Mon Sep 17 00:00:00 2001 From: rakshith210 <56937390+rakshith210@users.noreply.github.com> Date: Thu, 1 Jun 2023 14:20:10 -0700 Subject: [PATCH 182/632] Add Azure AD package for remote write (#11944) * Add Azure AD package for remote write * Made AzurePublic default and updated configuration.md * Updated config structure and removed getToken at initialization * Changed passing context from request Signed-off-by: Rakshith Padmanabha Signed-off-by: rakshith210 --- config/config.go | 12 +- config/config_test.go | 2 +- docs/configuration/configuration.md | 14 +- go.mod | 9 +- go.sum | 12 + storage/remote/azuread/README.md | 8 + storage/remote/azuread/azuread.go | 247 +++++++++++++++++ storage/remote/azuread/azuread_test.go | 252 ++++++++++++++++++ .../testdata/azuread_bad_clientidmissing.yaml | 1 + .../testdata/azuread_bad_invalidclientid.yaml | 3 + .../remote/azuread/testdata/azuread_good.yaml | 3 + .../testdata/azuread_good_cloudmissing.yaml | 2 + storage/remote/client.go | 9 + storage/remote/write.go | 1 + 14 files changed, 568 insertions(+), 7 deletions(-) create mode 100644 storage/remote/azuread/README.md create mode 100644 storage/remote/azuread/azuread.go create mode 100644 storage/remote/azuread/azuread_test.go create mode 100644 storage/remote/azuread/testdata/azuread_bad_clientidmissing.yaml create mode 100644 storage/remote/azuread/testdata/azuread_bad_invalidclientid.yaml create mode 100644 storage/remote/azuread/testdata/azuread_good.yaml create mode 100644 storage/remote/azuread/testdata/azuread_good_cloudmissing.yaml diff --git a/config/config.go b/config/config.go index 9f81bbfd5..d32fcc33c 100644 --- a/config/config.go +++ b/config/config.go @@ -34,6 +34,7 @@ import ( "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/storage/remote/azuread" ) var ( @@ -907,6 +908,7 @@ type RemoteWriteConfig struct { QueueConfig QueueConfig `yaml:"queue_config,omitempty"` MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"` SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"` + AzureADConfig *azuread.AzureADConfig `yaml:"azuread,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -943,8 +945,12 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil || c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil - if httpClientConfigAuthEnabled && c.SigV4Config != nil { - return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured") + if httpClientConfigAuthEnabled && (c.SigV4Config != nil || c.AzureADConfig != nil) { + return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured") + } + + if c.SigV4Config != nil && c.AzureADConfig != nil { + return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured") } return nil @@ -965,7 +971,7 @@ func validateHeadersForTracing(headers map[string]string) error { func validateHeaders(headers map[string]string) error { for header := range headers { if strings.ToLower(header) == "authorization" { - return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter") + return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter") } if _, ok := reservedHeaders[strings.ToLower(header)]; ok { return fmt.Errorf("%s is a reserved header. It must not be changed", header) diff --git a/config/config_test.go b/config/config_test.go index d243d687c..d3288cc90 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1727,7 +1727,7 @@ var expectedErrors = []struct { }, { filename: "remote_write_authorization_header.bad.yml", - errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter`, + errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`, }, { filename: "remote_write_url_missing.bad.yml", diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index ff1449e34..3a9ace2b6 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -3466,7 +3466,7 @@ authorization: [ credentials_file: ] # Optionally configures AWS's Signature Verification 4 signing process to -# sign requests. Cannot be set at the same time as basic_auth, authorization, or oauth2. +# sign requests. Cannot be set at the same time as basic_auth, authorization, oauth2, or azuread. # To use the default credentials from the AWS SDK, use `sigv4: {}`. sigv4: # The AWS region. If blank, the region from the default credentials chain @@ -3485,10 +3485,20 @@ sigv4: [ role_arn: ] # Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth, authorization, or sigv4. +# Cannot be used at the same time as basic_auth, authorization, sigv4, or azuread. oauth2: [ ] +# Optional AzureAD configuration. +# Cannot be used at the same time as basic_auth, authorization, oauth2, or sigv4. +azuread: + # The Azure Cloud. Options are 'AzurePublic', 'AzureChina', or 'AzureGovernment'. + [ cloud: | default = AzurePublic ] + + # Azure User-assigned Managed identity. + [ managed_identity: + [ client_id: ] + # Configures the remote write request's TLS settings. tls_config: [ ] diff --git a/go.mod b/go.mod index 9b826ad33..71f6a2b8d 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,8 @@ go 1.19 require ( github.com/Azure/azure-sdk-for-go v65.0.0+incompatible + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 github.com/Azure/go-autorest/autorest v0.11.28 github.com/Azure/go-autorest/autorest/adal v0.9.23 github.com/alecthomas/kingpin/v2 v2.3.2 @@ -83,10 +85,15 @@ require ( require ( cloud.google.com/go/compute/metadata v0.2.3 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e // indirect @@ -135,7 +142,7 @@ require ( github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.3.0 github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect github.com/googleapis/gax-go/v2 v2.7.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect diff --git a/go.sum b/go.sum index 3a472c99a..859ce81ca 100644 --- a/go.sum +++ b/go.sum @@ -38,6 +38,12 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 h1:gVXuXcWd1i4C2Ruxe321aU+IKGaStvGB/S90PUPB/W8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1/go.mod h1:DffdKW9RFqa5VgmsjUOsS7UE7eiA5iAvYUs63bhKQ0M= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 h1:T8quHYlUGyb/oqtSTwqlCr1ilJHrDv+ZtpSfo+hm1BU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1/go.mod h1:gLa1CL2RNE4s7M3yopJ/p0iq5DdY6Yv5ZUt9MTRZOQM= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 h1:+5VZ72z0Qan5Bog5C+ZkgSqUbeVUd9wgtHOrIKuc5b8= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= @@ -60,6 +66,8 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 h1:oPdPEZFSbl7oSPEAIPMPBMUmiL+mqgzBJwM/9qYcwNg= +github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1/go.mod h1:4qFor3D/HDsvBME35Xy9rwW9DecL+M2sNw1ybjPtwA0= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -515,6 +523,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/linode/linodego v1.16.1 h1:5otq57M4PdHycPERRfSFZ0s1yz1ETVWGjCp3hh7+F9w= @@ -630,6 +640,8 @@ github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAv github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= +github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= diff --git a/storage/remote/azuread/README.md b/storage/remote/azuread/README.md new file mode 100644 index 000000000..b3b4457a6 --- /dev/null +++ b/storage/remote/azuread/README.md @@ -0,0 +1,8 @@ +azuread package +========================================= + +azuread provides an http.RoundTripper that attaches an Azure AD accessToken +to remote write requests. + +This module is considered internal to Prometheus, without any stability +guarantees for external usage. diff --git a/storage/remote/azuread/azuread.go b/storage/remote/azuread/azuread.go new file mode 100644 index 000000000..94b6144da --- /dev/null +++ b/storage/remote/azuread/azuread.go @@ -0,0 +1,247 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azuread + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/google/uuid" +) + +const ( + // Clouds. + AzureChina = "AzureChina" + AzureGovernment = "AzureGovernment" + AzurePublic = "AzurePublic" + + // Audiences. + IngestionChinaAudience = "https://monitor.azure.cn//.default" + IngestionGovernmentAudience = "https://monitor.azure.us//.default" + IngestionPublicAudience = "https://monitor.azure.com//.default" +) + +// ManagedIdentityConfig is used to store managed identity config values +type ManagedIdentityConfig struct { + // ClientID is the clientId of the managed identity that is being used to authenticate. + ClientID string `yaml:"client_id,omitempty"` +} + +// AzureADConfig is used to store the config values. +type AzureADConfig struct { // nolint:revive + // ManagedIdentity is the managed identity that is being used to authenticate. + ManagedIdentity *ManagedIdentityConfig `yaml:"managed_identity,omitempty"` + + // Cloud is the Azure cloud in which the service is running. Example: AzurePublic/AzureGovernment/AzureChina. + Cloud string `yaml:"cloud,omitempty"` +} + +// azureADRoundTripper is used to store the roundtripper and the tokenprovider. +type azureADRoundTripper struct { + next http.RoundTripper + tokenProvider *tokenProvider +} + +// tokenProvider is used to store and retrieve Azure AD accessToken. +type tokenProvider struct { + // token is member used to store the current valid accessToken. + token string + // mu guards access to token. + mu sync.Mutex + // refreshTime is used to store the refresh time of the current valid accessToken. + refreshTime time.Time + // credentialClient is the Azure AD credential client that is being used to retrieve accessToken. + credentialClient azcore.TokenCredential + options *policy.TokenRequestOptions +} + +// Validate validates config values provided. +func (c *AzureADConfig) Validate() error { + if c.Cloud == "" { + c.Cloud = AzurePublic + } + + if c.Cloud != AzureChina && c.Cloud != AzureGovernment && c.Cloud != AzurePublic { + return fmt.Errorf("must provide a cloud in the Azure AD config") + } + + if c.ManagedIdentity == nil { + return fmt.Errorf("must provide an Azure Managed Identity in the Azure AD config") + } + + if c.ManagedIdentity.ClientID == "" { + return fmt.Errorf("must provide an Azure Managed Identity client_id in the Azure AD config") + } + + _, err := uuid.Parse(c.ManagedIdentity.ClientID) + if err != nil { + return fmt.Errorf("the provided Azure Managed Identity client_id provided is invalid") + } + return nil +} + +// UnmarshalYAML unmarshal the Azure AD config yaml. +func (c *AzureADConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + type plain AzureADConfig + *c = AzureADConfig{} + if err := unmarshal((*plain)(c)); err != nil { + return err + } + return c.Validate() +} + +// NewAzureADRoundTripper creates round tripper adding Azure AD authorization to calls. +func NewAzureADRoundTripper(cfg *AzureADConfig, next http.RoundTripper) (http.RoundTripper, error) { + if next == nil { + next = http.DefaultTransport + } + + cred, err := newTokenCredential(cfg) + if err != nil { + return nil, err + } + + tokenProvider, err := newTokenProvider(cfg, cred) + if err != nil { + return nil, err + } + + rt := &azureADRoundTripper{ + next: next, + tokenProvider: tokenProvider, + } + return rt, nil +} + +// RoundTrip sets Authorization header for requests. +func (rt *azureADRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + accessToken, err := rt.tokenProvider.getAccessToken(req.Context()) + if err != nil { + return nil, err + } + bearerAccessToken := "Bearer " + accessToken + req.Header.Set("Authorization", bearerAccessToken) + + return rt.next.RoundTrip(req) +} + +// newTokenCredential returns a TokenCredential of different kinds like Azure Managed Identity and Azure AD application. +func newTokenCredential(cfg *AzureADConfig) (azcore.TokenCredential, error) { + cred, err := newManagedIdentityTokenCredential(cfg.ManagedIdentity.ClientID) + if err != nil { + return nil, err + } + + return cred, nil +} + +// newManagedIdentityTokenCredential returns new Managed Identity token credential. +func newManagedIdentityTokenCredential(managedIdentityClientID string) (azcore.TokenCredential, error) { + clientID := azidentity.ClientID(managedIdentityClientID) + opts := &azidentity.ManagedIdentityCredentialOptions{ID: clientID} + return azidentity.NewManagedIdentityCredential(opts) +} + +// newTokenProvider helps to fetch accessToken for different types of credential. This also takes care of +// refreshing the accessToken before expiry. This accessToken is attached to the Authorization header while making requests. +func newTokenProvider(cfg *AzureADConfig, cred azcore.TokenCredential) (*tokenProvider, error) { + audience, err := getAudience(cfg.Cloud) + if err != nil { + return nil, err + } + + tokenProvider := &tokenProvider{ + credentialClient: cred, + options: &policy.TokenRequestOptions{Scopes: []string{audience}}, + } + + return tokenProvider, nil +} + +// getAccessToken returns the current valid accessToken. +func (tokenProvider *tokenProvider) getAccessToken(ctx context.Context) (string, error) { + tokenProvider.mu.Lock() + defer tokenProvider.mu.Unlock() + if tokenProvider.valid() { + return tokenProvider.token, nil + } + err := tokenProvider.getToken(ctx) + if err != nil { + return "", errors.New("Failed to get access token: " + err.Error()) + } + return tokenProvider.token, nil +} + +// valid checks if the token in the token provider is valid and not expired. +func (tokenProvider *tokenProvider) valid() bool { + if len(tokenProvider.token) == 0 { + return false + } + if tokenProvider.refreshTime.After(time.Now().UTC()) { + return true + } + return false +} + +// getToken retrieves a new accessToken and stores the newly retrieved token in the tokenProvider. +func (tokenProvider *tokenProvider) getToken(ctx context.Context) error { + accessToken, err := tokenProvider.credentialClient.GetToken(ctx, *tokenProvider.options) + if err != nil { + return err + } + if len(accessToken.Token) == 0 { + return errors.New("access token is empty") + } + + tokenProvider.token = accessToken.Token + err = tokenProvider.updateRefreshTime(accessToken) + if err != nil { + return err + } + return nil +} + +// updateRefreshTime handles logic to set refreshTime. The refreshTime is set at half the duration of the actual token expiry. +func (tokenProvider *tokenProvider) updateRefreshTime(accessToken azcore.AccessToken) error { + tokenExpiryTimestamp := accessToken.ExpiresOn.UTC() + deltaExpirytime := time.Now().Add(time.Until(tokenExpiryTimestamp) / 2) + if deltaExpirytime.After(time.Now().UTC()) { + tokenProvider.refreshTime = deltaExpirytime + } else { + return errors.New("access token expiry is less than the current time") + } + return nil +} + +// getAudience returns audiences for different clouds. +func getAudience(cloud string) (string, error) { + switch strings.ToLower(cloud) { + case strings.ToLower(AzureChina): + return IngestionChinaAudience, nil + case strings.ToLower(AzureGovernment): + return IngestionGovernmentAudience, nil + case strings.ToLower(AzurePublic): + return IngestionPublicAudience, nil + default: + return "", errors.New("Cloud is not specified or is incorrect: " + cloud) + } +} diff --git a/storage/remote/azuread/azuread_test.go b/storage/remote/azuread/azuread_test.go new file mode 100644 index 000000000..ebbd98958 --- /dev/null +++ b/storage/remote/azuread/azuread_test.go @@ -0,0 +1,252 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azuread + +import ( + "context" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/google/uuid" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + "gopkg.in/yaml.v2" +) + +const ( + dummyAudience = "dummyAudience" + dummyClientID = "00000000-0000-0000-0000-000000000000" + testTokenString = "testTokenString" +) + +var testTokenExpiry = time.Now().Add(10 * time.Second) + +type AzureAdTestSuite struct { + suite.Suite + mockCredential *mockCredential +} + +type TokenProviderTestSuite struct { + suite.Suite + mockCredential *mockCredential +} + +// mockCredential mocks azidentity TokenCredential interface. +type mockCredential struct { + mock.Mock +} + +func (ad *AzureAdTestSuite) BeforeTest(_, _ string) { + ad.mockCredential = new(mockCredential) +} + +func TestAzureAd(t *testing.T) { + suite.Run(t, new(AzureAdTestSuite)) +} + +func (ad *AzureAdTestSuite) TestAzureAdRoundTripper() { + var gotReq *http.Request + + testToken := &azcore.AccessToken{ + Token: testTokenString, + ExpiresOn: testTokenExpiry, + } + + managedIdentityConfig := &ManagedIdentityConfig{ + ClientID: dummyClientID, + } + + azureAdConfig := &AzureADConfig{ + Cloud: "AzurePublic", + ManagedIdentity: managedIdentityConfig, + } + + ad.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil) + + tokenProvider, err := newTokenProvider(azureAdConfig, ad.mockCredential) + ad.Assert().NoError(err) + + rt := &azureADRoundTripper{ + next: promhttp.RoundTripperFunc(func(req *http.Request) (*http.Response, error) { + gotReq = req + return &http.Response{StatusCode: http.StatusOK}, nil + }), + tokenProvider: tokenProvider, + } + + cli := &http.Client{Transport: rt} + + req, err := http.NewRequest(http.MethodPost, "https://example.com", strings.NewReader("Hello, world!")) + ad.Assert().NoError(err) + + _, err = cli.Do(req) + ad.Assert().NoError(err) + ad.Assert().NotNil(gotReq) + + origReq := gotReq + ad.Assert().NotEmpty(origReq.Header.Get("Authorization")) + ad.Assert().Equal("Bearer "+testTokenString, origReq.Header.Get("Authorization")) +} + +func loadAzureAdConfig(filename string) (*AzureADConfig, error) { + content, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + cfg := AzureADConfig{} + if err = yaml.UnmarshalStrict(content, &cfg); err != nil { + return nil, err + } + return &cfg, nil +} + +func testGoodConfig(t *testing.T, filename string) { + _, err := loadAzureAdConfig(filename) + if err != nil { + t.Fatalf("Unexpected error parsing %s: %s", filename, err) + } +} + +func TestGoodAzureAdConfig(t *testing.T) { + filename := "testdata/azuread_good.yaml" + testGoodConfig(t, filename) +} + +func TestGoodCloudMissingAzureAdConfig(t *testing.T) { + filename := "testdata/azuread_good_cloudmissing.yaml" + testGoodConfig(t, filename) +} + +func TestBadClientIdMissingAzureAdConfig(t *testing.T) { + filename := "testdata/azuread_bad_clientidmissing.yaml" + _, err := loadAzureAdConfig(filename) + if err == nil { + t.Fatalf("Did not receive expected error unmarshaling bad azuread config") + } + if !strings.Contains(err.Error(), "must provide an Azure Managed Identity in the Azure AD config") { + t.Errorf("Received unexpected error from unmarshal of %s: %s", filename, err.Error()) + } +} + +func TestBadInvalidClientIdAzureAdConfig(t *testing.T) { + filename := "testdata/azuread_bad_invalidclientid.yaml" + _, err := loadAzureAdConfig(filename) + if err == nil { + t.Fatalf("Did not receive expected error unmarshaling bad azuread config") + } + if !strings.Contains(err.Error(), "the provided Azure Managed Identity client_id provided is invalid") { + t.Errorf("Received unexpected error from unmarshal of %s: %s", filename, err.Error()) + } +} + +func (m *mockCredential) GetToken(ctx context.Context, options policy.TokenRequestOptions) (azcore.AccessToken, error) { + args := m.MethodCalled("GetToken", ctx, options) + if args.Get(0) == nil { + return azcore.AccessToken{}, args.Error(1) + } + + return args.Get(0).(azcore.AccessToken), nil +} + +func (s *TokenProviderTestSuite) BeforeTest(_, _ string) { + s.mockCredential = new(mockCredential) +} + +func TestTokenProvider(t *testing.T) { + suite.Run(t, new(TokenProviderTestSuite)) +} + +func (s *TokenProviderTestSuite) TestNewTokenProvider_NilAudience_Fail() { + managedIdentityConfig := &ManagedIdentityConfig{ + ClientID: dummyClientID, + } + + azureAdConfig := &AzureADConfig{ + Cloud: "PublicAzure", + ManagedIdentity: managedIdentityConfig, + } + + actualTokenProvider, actualErr := newTokenProvider(azureAdConfig, s.mockCredential) + + s.Assert().Nil(actualTokenProvider) + s.Assert().NotNil(actualErr) + s.Assert().Equal("Cloud is not specified or is incorrect: "+azureAdConfig.Cloud, actualErr.Error()) +} + +func (s *TokenProviderTestSuite) TestNewTokenProvider_Success() { + managedIdentityConfig := &ManagedIdentityConfig{ + ClientID: dummyClientID, + } + + azureAdConfig := &AzureADConfig{ + Cloud: "AzurePublic", + ManagedIdentity: managedIdentityConfig, + } + s.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(getToken(), nil) + + actualTokenProvider, actualErr := newTokenProvider(azureAdConfig, s.mockCredential) + + s.Assert().NotNil(actualTokenProvider) + s.Assert().Nil(actualErr) + s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background())) +} + +func (s *TokenProviderTestSuite) TestPeriodicTokenRefresh_Success() { + // setup + managedIdentityConfig := &ManagedIdentityConfig{ + ClientID: dummyClientID, + } + + azureAdConfig := &AzureADConfig{ + Cloud: "AzurePublic", + ManagedIdentity: managedIdentityConfig, + } + testToken := &azcore.AccessToken{ + Token: testTokenString, + ExpiresOn: testTokenExpiry, + } + + s.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil).Once(). + On("GetToken", mock.Anything, mock.Anything).Return(getToken(), nil) + + actualTokenProvider, actualErr := newTokenProvider(azureAdConfig, s.mockCredential) + + s.Assert().NotNil(actualTokenProvider) + s.Assert().Nil(actualErr) + s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background())) + + // Token set to refresh at half of the expiry time. The test tokens are set to expiry in 10s. + // Hence, the 6 seconds wait to check if the token is refreshed. + time.Sleep(6 * time.Second) + + s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background())) + + s.mockCredential.AssertNumberOfCalls(s.T(), "GetToken", 2) + accessToken, err := actualTokenProvider.getAccessToken(context.Background()) + s.Assert().Nil(err) + s.Assert().NotEqual(accessToken, testTokenString) +} + +func getToken() azcore.AccessToken { + return azcore.AccessToken{ + Token: uuid.New().String(), + ExpiresOn: time.Now().Add(10 * time.Second), + } +} diff --git a/storage/remote/azuread/testdata/azuread_bad_clientidmissing.yaml b/storage/remote/azuread/testdata/azuread_bad_clientidmissing.yaml new file mode 100644 index 000000000..68b119cd4 --- /dev/null +++ b/storage/remote/azuread/testdata/azuread_bad_clientidmissing.yaml @@ -0,0 +1 @@ +cloud: AzurePublic diff --git a/storage/remote/azuread/testdata/azuread_bad_invalidclientid.yaml b/storage/remote/azuread/testdata/azuread_bad_invalidclientid.yaml new file mode 100644 index 000000000..1f72fbb71 --- /dev/null +++ b/storage/remote/azuread/testdata/azuread_bad_invalidclientid.yaml @@ -0,0 +1,3 @@ +cloud: AzurePublic +managed_identity: + client_id: foo-foobar-bar-foo-00000000 diff --git a/storage/remote/azuread/testdata/azuread_good.yaml b/storage/remote/azuread/testdata/azuread_good.yaml new file mode 100644 index 000000000..de39f0a06 --- /dev/null +++ b/storage/remote/azuread/testdata/azuread_good.yaml @@ -0,0 +1,3 @@ +cloud: AzurePublic +managed_identity: + client_id: 00000000-0000-0000-0000-000000000000 diff --git a/storage/remote/azuread/testdata/azuread_good_cloudmissing.yaml b/storage/remote/azuread/testdata/azuread_good_cloudmissing.yaml new file mode 100644 index 000000000..bef631874 --- /dev/null +++ b/storage/remote/azuread/testdata/azuread_good_cloudmissing.yaml @@ -0,0 +1,2 @@ +managed_identity: + client_id: 00000000-0000-0000-0000-000000000000 diff --git a/storage/remote/client.go b/storage/remote/client.go index 1625c9918..33774203c 100644 --- a/storage/remote/client.go +++ b/storage/remote/client.go @@ -36,6 +36,7 @@ import ( "go.opentelemetry.io/otel/trace" "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/storage/remote/azuread" ) const maxErrMsgLen = 1024 @@ -97,6 +98,7 @@ type ClientConfig struct { Timeout model.Duration HTTPClientConfig config_util.HTTPClientConfig SigV4Config *sigv4.SigV4Config + AzureADConfig *azuread.AzureADConfig Headers map[string]string RetryOnRateLimit bool } @@ -146,6 +148,13 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) { } } + if conf.AzureADConfig != nil { + t, err = azuread.NewAzureADRoundTripper(conf.AzureADConfig, httpClient.Transport) + if err != nil { + return nil, err + } + } + if len(conf.Headers) > 0 { t = newInjectHeadersRoundTripper(conf.Headers, t) } diff --git a/storage/remote/write.go b/storage/remote/write.go index 6a33c0adf..4b0a24901 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -158,6 +158,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error { Timeout: rwConf.RemoteTimeout, HTTPClientConfig: rwConf.HTTPClientConfig, SigV4Config: rwConf.SigV4Config, + AzureADConfig: rwConf.AzureADConfig, Headers: rwConf.Headers, RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit, }) From 084f38df12fe7c5693f7b0e7164cdb1570a525b7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Jun 2023 23:57:03 +0000 Subject: [PATCH 183/632] build(deps): bump bufbuild/buf-setup-action from 1.17.0 to 1.20.0 Bumps [bufbuild/buf-setup-action](https://github.com/bufbuild/buf-setup-action) from 1.17.0 to 1.20.0. - [Release notes](https://github.com/bufbuild/buf-setup-action/releases) - [Commits](https://github.com/bufbuild/buf-setup-action/compare/v1.17.0...v1.20.0) --- updated-dependencies: - dependency-name: bufbuild/buf-setup-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/buf-lint.yml | 2 +- .github/workflows/buf.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index 79430ee56..a72837b79 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.17.0 + - uses: bufbuild/buf-setup-action@v1.20.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@v1 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index 06e53172e..b63f073fa 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -9,7 +9,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.17.0 + - uses: bufbuild/buf-setup-action@v1.20.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@v1 From 90816aa39ecc7e9017a4de653615c461f1516f08 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 Jun 2023 00:01:29 +0000 Subject: [PATCH 184/632] build(deps): bump github.com/prometheus/prometheus Bumps [github.com/prometheus/prometheus](https://github.com/prometheus/prometheus) from 0.43.0 to 0.44.0. - [Release notes](https://github.com/prometheus/prometheus/releases) - [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/prometheus/compare/v0.43.0...v0.44.0) --- updated-dependencies: - dependency-name: github.com/prometheus/prometheus dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 14 ++--- documentation/examples/remote_storage/go.sum | 60 ++++++++++---------- 2 files changed, 37 insertions(+), 37 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 1db3e9a9e..c0d433196 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -10,13 +10,13 @@ require ( github.com/influxdata/influxdb v1.11.0 github.com/prometheus/client_golang v1.15.0 github.com/prometheus/common v0.42.0 - github.com/prometheus/prometheus v0.43.0 + github.com/prometheus/prometheus v0.44.0 github.com/stretchr/testify v1.8.2 ) require ( github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect - github.com/aws/aws-sdk-go v1.44.217 // indirect + github.com/aws/aws-sdk-go v1.44.245 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -44,11 +44,11 @@ require ( go.opentelemetry.io/otel/trace v1.14.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/goleak v1.2.1 // indirect - golang.org/x/exp v0.0.0-20230307190834-24139beb5833 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/oauth2 v0.6.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect + golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/oauth2 v0.7.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.30.0 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index c4350e78b..e0eac05c1 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -1,7 +1,7 @@ github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= -github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= @@ -19,8 +19,8 @@ github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAu github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.217 h1:FcWC56MRl+k756aH3qeMQTylSdeJ58WN0iFz3fkyRz0= -github.com/aws/aws-sdk-go v1.44.217/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.245 h1:KtY2s4q31/kn33AdV63R5t77mdxsI7rq3YT7Mgo805M= +github.com/aws/aws-sdk-go v1.44.245/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -35,15 +35,15 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/digitalocean/godo v1.97.0 h1:p9w1yCcWMZcxFSLPToNGXA96WfUVLXqoHti6GzVomL4= +github.com/digitalocean/godo v1.98.0 h1:potyC1eD0N9n5/P4/WmJuKgg+OGYZOBWEW+/aKTX6QQ= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/docker v23.0.1+incompatible h1:vjgvJZxprTTE1A37nm+CLNAdwu6xZekyoiVlUZEINcY= +github.com/docker/docker v23.0.4+incompatible h1:Kd3Bh9V/rO+XpTP/BLqM+gx8z7+Yb0AA2Ibj+nNo4ek= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= -github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= +github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -100,7 +100,7 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/gophercloud/gophercloud v1.2.0 h1:1oXyj4g54KBg/kFtCdMM6jtxSzeIyg8wv4z1HoGPp1E= +github.com/gophercloud/gophercloud v1.3.0 h1:RUKyCMiZoQR3VlVR5E3K7PK1AC3/qppsWYo6dtBiqs8= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= @@ -114,13 +114,13 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= -github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b h1:EkuSTU8c/63q4LMayj8ilgg/4I5PXDFVcnqKfs9qcwI= +github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 h1:I5xhKLePXpXgM6pZ4xZNTiurLLS3sGuZrZFFzAbM67A= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= -github.com/hetznercloud/hcloud-go v1.41.0 h1:KJGFRRc68QiVu4PrEP5BmCQVveCP2CM26UGQUKGpIUs= +github.com/hetznercloud/hcloud-go v1.42.0 h1:Es/CDOForQN3nOOP5Vxh1N/YHjpCg386iYEX5zCgi+A= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/influxdata/influxdb v1.11.0 h1:0X+ZsbcOWc6AEi5MHee9BYqXCKmz8IZsljrRYjmV8Qg= github.com/influxdata/influxdb v1.11.0/go.mod h1:V93tJcidY0Zh0LtSONZWnXXGDyt20dtVf+Ddp4EnhaA= -github.com/ionos-cloud/sdk-go/v6 v6.1.4 h1:BJHhFA8Q1SZC7VOXqKKr2BV2ysQ2/4hlk1e4hZte7GY= +github.com/ionos-cloud/sdk-go/v6 v6.1.6 h1:0n4irdqNska+1s3YMCRhrAqKbibEgQ7SwwhAlHzYT5A= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -146,14 +146,14 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/linode/linodego v1.14.1 h1:uGxQyy0BidoEpLGdvfi4cPgEW+0YUFsEGrLEhcTfjNc= +github.com/linode/linodego v1.16.1 h1:5otq57M4PdHycPERRfSFZ0s1yz1ETVWGjCp3hh7+F9w= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.1.51 h1:0+Xg7vObnhrz/4ZCZcZh7zPXlmU0aveS2HDBd0m0qSo= +github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -169,7 +169,7 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= -github.com/ovh/go-ovh v1.3.0 h1:mvZaddk4E4kLcXhzb+cxBsMPYp2pHqiQpWYkInsuZPQ= +github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -201,10 +201,10 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/prometheus/prometheus v0.43.0 h1:18iCSfrbAHbXvYFvR38U1Pt4uZmU9SmDcCpCrBKUiGg= -github.com/prometheus/prometheus v0.43.0/go.mod h1:2BA14LgBeqlPuzObSEbh+Y+JwLH2GcqDlJKbF2sA6FM= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 h1:yFl3jyaSVLNYXlnNYM5z2pagEk1dYQhfr1p20T1NyKY= +github.com/prometheus/prometheus v0.44.0 h1:sgn8Fdx+uE5tHQn0/622swlk2XnIj6udoZCnbVjHIgc= +github.com/prometheus/prometheus v0.44.0/go.mod h1:aPsmIK3py5XammeTguyqTmuqzX/jeCdyOWWobLHNKQg= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 h1:Y7xOFbD+3jaPw+VN7lkakNJ/pa+ZSQVFp1ONtJaBxns= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -245,12 +245,12 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= -golang.org/x/exp v0.0.0-20230307190834-24139beb5833 h1:SChBja7BCQewoTAU7IgvucQKMIXrEpFxNMs0spT3/5s= -golang.org/x/exp v0.0.0-20230307190834-24139beb5833/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -267,12 +267,12 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -299,20 +299,20 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= +golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -320,7 +320,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From ac8abdaacda605bdcd358ec4b6d30e634a0a569a Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 5 Jun 2023 17:36:11 +0200 Subject: [PATCH 185/632] Rename remaining jitterSeed -> offsetSeed variables (#12414) I had changed the naming from "jitter" to "offset" in: https://github.com/prometheus/prometheus/commit/cb045c0e4b94bbf3eee174d91b5ef2b8553948d5 ...but I forgot to add this file to the commit to complete the renaming, doing that now. Signed-off-by: Julius Volz --- scrape/target.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scrape/target.go b/scrape/target.go index a655e8541..8b745a9c4 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -154,14 +154,14 @@ func (t *Target) hash() uint64 { } // offset returns the time until the next scrape cycle for the target. -// It includes the global server jitterSeed for scrapes from multiple Prometheus to try to be at different times. -func (t *Target) offset(interval time.Duration, jitterSeed uint64) time.Duration { +// It includes the global server offsetSeed for scrapes from multiple Prometheus to try to be at different times. +func (t *Target) offset(interval time.Duration, offsetSeed uint64) time.Duration { now := time.Now().UnixNano() // Base is a pinned to absolute time, no matter how often offset is called. var ( base = int64(interval) - now%int64(interval) - offset = (t.hash() ^ jitterSeed) % uint64(interval) + offset = (t.hash() ^ offsetSeed) % uint64(interval) next = base + int64(offset) ) From bfa466d00f152aa8dd58494c171872d7f95e3d5e Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Wed, 7 Jun 2023 12:29:04 +0200 Subject: [PATCH 186/632] Create release candidate 2.45.0-rc.0 (#12435) Signed-off-by: Jesus Vazquez --- CHANGELOG.md | 24 + VERSION | 2 +- go.mod | 59 +- go.sum | 118 ++-- web/api/v1/api.go | 2 +- web/ui/module/codemirror-promql/package.json | 18 +- web/ui/module/lezer-promql/package.json | 6 +- web/ui/package-lock.json | 592 ++++++++++--------- web/ui/package.json | 12 +- web/ui/react-app/package.json | 44 +- web/web.go | 2 +- 11 files changed, 464 insertions(+), 415 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ff702215..3b4ee3fc1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # Changelog +## 2.45.0-rc.0 / 2023-06-05 + +This release is a LTS (Long-Term Support) release of Prometheus and will +receive security, documentation and bugfix patches for at least 12 months. +Please read more about our LTS release cycle at +. + +* [FEATURE] API: New limit parameter to limit the number of items returned by `/api/v1/status/tsdb` endpoint. #12336 +* [FEATURE] Config: Add limits to global config. #12126 +* [FEATURE] Consul SD: Added support for `path_prefix`. #12372 +* [FEATURE] Native histograms: Add option to scrape both classic and native histograms. #12350 +* [FEATURE] Native histograms: Added support for two more arithmetic operators `avg_over_time` and `sum_over_time`. #12262 +* [FEATURE] Promtool: When providing the block id, only one block will be loaded and analyzed. #12031 +* [FEATURE] Remote-write: New Azure ad configuration to support remote writing directly to Azure Monitor workspace. #11944 +* [FEATURE] TSDB: Samples per chunk are now configurable with flag `storage.tsdb.samples-per-chunk`. By default set to its former value 120. #12055 +* [ENHANCEMENT] API: Improving Performance on the API Gzip Handler. #12363 +* [ENHANCEMENT] Native histograms: bucket size can now be limited to avoid scrape fails. #12254 +* [ENHANCEMENT] TSDB: Dropped series are now deleted from the WAL sooner. #12297 +* [BUGFIX] Native histograms: ChunkSeries iterator now checks if a new sample can be appended to the open chunk. #12185 +* [BUGFIX] Native histograms: Fix Histogram Appender `Appendable()` segfault. #12357 +* [BUGFIX] Native histograms: Fix setting reset header to gauge histograms in seriesToChunkEncoder. #12329 +* [BUGFIX] TSDB: Tombstone intervals are not modified after Get() call. #12245 +* [BUGFIX] TSDB: Use path/filepath to set the WAL directory. #12349 + ## 2.44.0 / 2023-05-13 This version is built with Go tag `stringlabels`, to use the smaller data diff --git a/VERSION b/VERSION index 3e197472e..ae2ba732a 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.44.0 +2.45.0-rc.0 diff --git a/go.mod b/go.mod index 71f6a2b8d..dd75cc333 100644 --- a/go.mod +++ b/go.mod @@ -6,18 +6,18 @@ require ( github.com/Azure/azure-sdk-for-go v65.0.0+incompatible github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 - github.com/Azure/go-autorest/autorest v0.11.28 + github.com/Azure/go-autorest/autorest v0.11.29 github.com/Azure/go-autorest/autorest/adal v0.9.23 github.com/alecthomas/kingpin/v2 v2.3.2 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 - github.com/aws/aws-sdk-go v1.44.245 + github.com/aws/aws-sdk-go v1.44.276 github.com/cespare/xxhash/v2 v2.2.0 github.com/dennwc/varint v1.0.0 github.com/digitalocean/godo v1.99.0 - github.com/docker/docker v23.0.4+incompatible + github.com/docker/docker v24.0.2+incompatible github.com/edsrzf/mmap-go v1.1.0 github.com/envoyproxy/go-control-plane v0.11.0 - github.com/envoyproxy/protoc-gen-validate v0.10.1 + github.com/envoyproxy/protoc-gen-validate v1.0.1 github.com/fsnotify/fsnotify v1.6.0 github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.6.0 @@ -25,19 +25,19 @@ require ( github.com/go-zookeeper/zk v1.0.3 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/google/pprof v0.0.0-20230406165453-00490a63f317 - github.com/gophercloud/gophercloud v1.3.0 + github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 + github.com/gophercloud/gophercloud v1.4.0 github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/hashicorp/consul/api v1.20.0 - github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 + github.com/hashicorp/consul/api v1.21.0 + github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f github.com/hetznercloud/hcloud-go v1.45.1 - github.com/ionos-cloud/sdk-go/v6 v6.1.6 + github.com/ionos-cloud/sdk-go/v6 v6.1.7 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.13.6 + github.com/klauspost/compress v1.16.5 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.16.1 - github.com/miekg/dns v1.1.53 + github.com/linode/linodego v1.17.0 + github.com/miekg/dns v1.1.54 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/oklog/run v1.1.0 github.com/oklog/ulid v1.3.1 @@ -45,14 +45,14 @@ require ( github.com/pkg/errors v0.9.1 github.com/prometheus/alertmanager v0.25.0 github.com/prometheus/client_golang v1.15.1 - github.com/prometheus/client_model v0.3.0 - github.com/prometheus/common v0.42.0 + github.com/prometheus/client_model v0.4.0 + github.com/prometheus/common v0.44.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/exporter-toolkit v0.10.0 - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.17 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 - github.com/stretchr/testify v1.8.3 + github.com/stretchr/testify v1.8.4 github.com/vultr/govultr/v2 v2.17.2 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 go.opentelemetry.io/otel v1.16.0 @@ -61,17 +61,17 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0 go.opentelemetry.io/otel/sdk v1.16.0 go.opentelemetry.io/otel/trace v1.16.0 - go.uber.org/atomic v1.10.0 + go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.2 go.uber.org/goleak v1.2.1 golang.org/x/net v0.10.0 - golang.org/x/oauth2 v0.7.0 - golang.org/x/sync v0.1.0 + golang.org/x/oauth2 v0.8.0 + golang.org/x/sync v0.2.0 golang.org/x/sys v0.8.0 golang.org/x/time v0.3.0 - golang.org/x/tools v0.8.0 + golang.org/x/tools v0.9.3 google.golang.org/api v0.114.0 - google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e + google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 google.golang.org/grpc v1.55.0 google.golang.org/protobuf v1.30.0 gopkg.in/yaml.v2 v2.4.0 @@ -80,23 +80,20 @@ require ( k8s.io/apimachinery v0.26.2 k8s.io/client-go v0.26.2 k8s.io/klog v1.0.0 - k8s.io/klog/v2 v2.90.1 + k8s.io/klog/v2 v2.100.1 ) require ( cloud.google.com/go/compute/metadata v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect - github.com/rogpeppe/go-internal v1.10.0 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect - google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e // indirect ) require ( @@ -112,7 +109,7 @@ require ( github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 // indirect + github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect @@ -146,7 +143,7 @@ require ( github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect github.com/googleapis/gax-go/v2 v2.7.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0 // indirect github.com/hashicorp/cronexpr v1.1.1 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v1.4.0 // indirect @@ -182,7 +179,7 @@ require ( go.opentelemetry.io/otel/metric v1.16.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect golang.org/x/crypto v0.8.0 // indirect - golang.org/x/exp v0.0.0-20230321023759-10a507213a29 + golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 golang.org/x/mod v0.10.0 // indirect golang.org/x/term v0.8.0 // indirect golang.org/x/text v0.9.0 // indirect @@ -190,7 +187,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gotest.tools/v3 v3.0.3 // indirect - k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d // indirect + k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect k8s.io/utils v0.0.0-20230308161112-d77c459e9343 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect diff --git a/go.sum b/go.sum index 859ce81ca..774deb85b 100644 --- a/go.sum +++ b/go.sum @@ -42,15 +42,15 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 h1:gVXuXcWd1i4C2Ruxe321aU+IK github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1/go.mod h1:DffdKW9RFqa5VgmsjUOsS7UE7eiA5iAvYUs63bhKQ0M= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 h1:T8quHYlUGyb/oqtSTwqlCr1ilJHrDv+ZtpSfo+hm1BU= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1/go.mod h1:gLa1CL2RNE4s7M3yopJ/p0iq5DdY6Yv5ZUt9MTRZOQM= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 h1:+5VZ72z0Qan5Bog5C+ZkgSqUbeVUd9wgtHOrIKuc5b8= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= -github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= -github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= +github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= +github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= @@ -106,8 +106,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.245 h1:KtY2s4q31/kn33AdV63R5t77mdxsI7rq3YT7Mgo805M= -github.com/aws/aws-sdk-go v1.44.245/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.276 h1:ywPlx9C5Yc482dUgAZ9bHpQ6onVvJvYE9FJWsNDCEy0= +github.com/aws/aws-sdk-go v1.44.276/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -138,8 +138,8 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLWvQsR9CzAKt2e+EWV2yX9oXQ4= -github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74 h1:zlUubfBUxApscKFsF4VSvvfhsBNTBu0eF/ddvpo96yk= +github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -162,8 +162,8 @@ github.com/digitalocean/godo v1.99.0/go.mod h1:SsS2oXo2rznfM/nORlZ/6JaUJZFhmKTib github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v23.0.4+incompatible h1:Kd3Bh9V/rO+XpTP/BLqM+gx8z7+Yb0AA2Ibj+nNo4ek= -github.com/docker/docker v23.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg= +github.com/docker/docker v24.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -188,8 +188,8 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= -github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v1.0.1 h1:kt9FtLiooDc0vbwTLhdg3dyNX1K9Qwa1EK9LcD4jVUQ= +github.com/envoyproxy/protoc-gen-validate v1.0.1/go.mod h1:0vj8bNkYbSTNS2PIyH87KZaeN4x9zpL9Qt8fQC7d+vs= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -301,7 +301,6 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -376,8 +375,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20230406165453-00490a63f317 h1:hFhpt7CTmR3DX+b4R19ydQFtofxT0Sv3QsKNMVQYTMQ= -github.com/google/pprof v0.0.0-20230406165453-00490a63f317/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= +github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 h1:hR7/MlvK23p6+lIw9SN1TigNLn9ZnF3W4SYRKq2gAHs= +github.com/google/pprof v0.0.0-20230602150820-91b7bce49751/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -390,8 +389,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= -github.com/gophercloud/gophercloud v1.3.0 h1:RUKyCMiZoQR3VlVR5E3K7PK1AC3/qppsWYo6dtBiqs8= -github.com/gophercloud/gophercloud v1.3.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.4.0 h1:RqEu43vaX0lb0LanZr5BylK5ICVxjpFFoc0sxivyuHU= +github.com/gophercloud/gophercloud v1.4.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -407,11 +406,11 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 h1:gDLXvp5S9izjldquuoAhDzccbskOL6tDC5jMSyx3zxE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0 h1:1JYBfzqrWPcCclBwxFCPAou9n+q86mfnu7NAeHfte7A= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0/go.mod h1:YDZoGHuwE+ov0c8smSH49WLF3F2LaWnYYuDVd+EWrc0= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.20.0 h1:9IHTjNVSZ7MIwjlW3N3a7iGiykCMDpxZu8jsxFJh0yc= -github.com/hashicorp/consul/api v1.20.0/go.mod h1:nR64eD44KQ59Of/ECwt2vUmIK2DKsDzAwTmwmLl8Wpo= +github.com/hashicorp/consul/api v1.21.0 h1:WMR2JiyuaQWRAMFaOGiYfY4Q4HRpyYRe/oYQofjyduM= +github.com/hashicorp/consul/api v1.21.0/go.mod h1:f8zVJwBcLdr1IQnfdfszjUM0xzp31Zl3bpws3pL9uFM= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.13.1 h1:EygWVWWMczTzXGpO93awkHFzfUka6hLYJ0qhETd+6lY= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= @@ -460,8 +459,8 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 h1:I5xhKLePXpXgM6pZ4xZNTiurLLS3sGuZrZFFzAbM67A= -github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197/go.mod h1:2TCrNvonL09r7EiQ6M2rNt+Cmjbn1QbzchFoTWJFpj4= +github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f h1:yxjcAZRuYymIDC0W4IQHgTe9EQdu2BsjPlVmKwyVZT4= +github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f/go.mod h1:Xjd3OXUTfsWbCCBsQd3EdfPTz5evDi+fxqdvpN+WqQg= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= @@ -474,8 +473,8 @@ github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ionos-cloud/sdk-go/v6 v6.1.6 h1:0n4irdqNska+1s3YMCRhrAqKbibEgQ7SwwhAlHzYT5A= -github.com/ionos-cloud/sdk-go/v6 v6.1.6/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/ionos-cloud/sdk-go/v6 v6.1.7 h1:uVG1Q/ZDJ7YmCI9Oevpue9xJEH5UrUMyXv8gm7NTxIw= +github.com/ionos-cloud/sdk-go/v6 v6.1.7/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -507,8 +506,9 @@ github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0Lh github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= +github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -527,8 +527,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.16.1 h1:5otq57M4PdHycPERRfSFZ0s1yz1ETVWGjCp3hh7+F9w= -github.com/linode/linodego v1.16.1/go.mod h1:aESRAbpLY9R6IA1WGAWHikRI9DU9Lhesapv1MhKmPHM= +github.com/linode/linodego v1.17.0 h1:aWS98f0jUoY2lhsEuBxRdVkqyGM0nazPd68AEDF0EvU= +github.com/linode/linodego v1.17.0/go.mod h1:/omzPxie0/YI6S0sTw1q47qDt5IYSlbO/infRR4UG+A= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -561,8 +561,8 @@ github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04 github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw= -github.com/miekg/dns v1.1.53/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= +github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI= +github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -640,8 +640,8 @@ github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAv github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -669,8 +669,8 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1: github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= @@ -678,8 +678,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= @@ -701,15 +701,14 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 h1:Y7xOFbD+3jaPw+VN7lkakNJ/pa+ZSQVFp1ONtJaBxns= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.17 h1:1WuWJu7/e8SqK+uQl7lfk/N/oMZTL2NE/TJsNKRNMc4= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.17/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shoenig/test v0.6.3 h1:GVXWJFk9PiOjN0KoJ7VrJGH6uLPnqxR7/fe3HUPfE0c= +github.com/shoenig/test v0.6.6 h1:Oe8TPH9wAbv++YPNDKJWUnI8Q4PPWCx3UbOfH+FxiMU= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -752,8 +751,8 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -818,8 +817,8 @@ go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJP go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME= go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= @@ -857,8 +856,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -938,8 +937,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -953,8 +952,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1010,6 +1009,7 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1100,8 +1100,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= +golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1163,12 +1163,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e h1:Ao9GzfUMPH3zjVfzXG5rlWlk+Q8MXWKwWpwVQE1MXfw= -google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e h1:AZX1ra8YbFMSb7+1pI8S9v4rrgRR7jU1FmuFSSjTVcQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e h1:NumxXLPfHSndr3wBBdeKiVHjGVFzi9RX2HwwQke94iY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 h1:khxVcsk/FhnzxMKOyD+TDGwjbEOpcPuIpmafPGFmhMA= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1259,8 +1255,8 @@ k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ= k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI= k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU= -k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d h1:VcFq5n7wCJB2FQMCIHfC+f+jNcGgNMar1uKd6rVlifU= -k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= k8s.io/utils v0.0.0-20230308161112-d77c459e9343 h1:m7tbIjXGcGIAtpmQr7/NAi7RsWoW3E7Zcm4jI1HicTc= k8s.io/utils v0.0.0-20230308161112-d77c459e9343/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/web/api/v1/api.go b/web/api/v1/api.go index f7249efb0..e9b0081b3 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1491,7 +1491,7 @@ func (api *API) serveTSDBStatus(r *http.Request) apiFuncResult { chunkCount := int64(math.NaN()) for _, mF := range metrics { if *mF.Name == "prometheus_tsdb_head_chunks" { - m := *mF.Metric[0] + m := mF.Metric[0] if m.Gauge != nil { chunkCount = int64(m.Gauge.GetValue()) break diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index e819319ee..e9c952c9c 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -33,17 +33,17 @@ "lru-cache": "^6.0.0" }, "devDependencies": { - "@codemirror/autocomplete": "^6.4.0", - "@codemirror/language": "^6.4.0", - "@codemirror/lint": "^6.1.0", - "@codemirror/state": "^6.2.0", - "@codemirror/view": "^6.7.3", - "@lezer/common": "^1.0.2", - "@lezer/lr": "^1.3.1", - "@lezer/highlight": "^1.1.3", + "@codemirror/autocomplete": "^6.7.1", + "@codemirror/language": "^6.7.0", + "@codemirror/lint": "^6.2.2", + "@codemirror/state": "^6.2.1", + "@codemirror/view": "^6.13.0", + "@lezer/common": "^1.0.3", + "@lezer/lr": "^1.3.6", + "@lezer/highlight": "^1.1.6", "@types/lru-cache": "^5.1.1", "isomorphic-fetch": "^3.0.0", - "nock": "^13.3.0" + "nock": "^13.3.1" }, "peerDependencies": { "@codemirror/autocomplete": "^6.4.0", diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 7daa56738..08249b418 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -30,9 +30,9 @@ "test": "NODE_OPTIONS=--experimental-vm-modules jest" }, "devDependencies": { - "@lezer/generator": "^1.2.2", - "@lezer/lr": "^1.3.1", - "@lezer/highlight": "^1.1.3" + "@lezer/generator": "^1.2.3", + "@lezer/lr": "^1.3.6", + "@lezer/highlight": "^1.1.6" }, "peerDependencies": { "@lezer/lr": "^1.2.3", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 1d3d1d3e3..a672a1767 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -10,17 +10,17 @@ "module/*" ], "devDependencies": { - "@types/jest": "^29.4.0", + "@types/jest": "^29.5.2", "@types/node": "^17.0.45", - "eslint-config-prettier": "^8.6.0", + "eslint-config-prettier": "^8.8.0", "eslint-config-react-app": "^7.0.1", "eslint-plugin-prettier": "^4.2.1", - "jest-canvas-mock": "^2.4.0", + "jest-canvas-mock": "^2.5.1", "jest-fetch-mock": "^3.0.3", - "prettier": "^2.8.3", + "prettier": "^2.8.8", "react-scripts": "^5.0.1", - "ts-jest": "^29.0.5", - "typescript": "^4.9.4" + "ts-jest": "^29.1.0", + "typescript": "^4.9.5" }, "engines": { "npm": ">=7.0.0" @@ -35,17 +35,17 @@ "lru-cache": "^6.0.0" }, "devDependencies": { - "@codemirror/autocomplete": "^6.4.0", - "@codemirror/language": "^6.4.0", - "@codemirror/lint": "^6.1.0", - "@codemirror/state": "^6.2.0", - "@codemirror/view": "^6.7.3", - "@lezer/common": "^1.0.2", - "@lezer/highlight": "^1.1.3", - "@lezer/lr": "^1.3.1", + "@codemirror/autocomplete": "^6.7.1", + "@codemirror/language": "^6.7.0", + "@codemirror/lint": "^6.2.2", + "@codemirror/state": "^6.2.1", + "@codemirror/view": "^6.13.0", + "@lezer/common": "^1.0.3", + "@lezer/highlight": "^1.1.6", + "@lezer/lr": "^1.3.6", "@types/lru-cache": "^5.1.1", "isomorphic-fetch": "^3.0.0", - "nock": "^13.3.0" + "nock": "^13.3.1" }, "engines": { "node": ">=12.0.0" @@ -64,9 +64,9 @@ "version": "0.44.0", "license": "Apache-2.0", "devDependencies": { - "@lezer/generator": "^1.2.2", - "@lezer/highlight": "^1.1.3", - "@lezer/lr": "^1.3.1" + "@lezer/generator": "^1.2.3", + "@lezer/highlight": "^1.1.6", + "@lezer/lr": "^1.3.6" }, "peerDependencies": { "@lezer/highlight": "^1.1.2", @@ -2080,9 +2080,9 @@ "dev": true }, "node_modules/@codemirror/autocomplete": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.4.0.tgz", - "integrity": "sha512-HLF2PnZAm1s4kGs30EiqKMgD7XsYaQ0XJnMR0rofEWQ5t5D60SfqpDIkIh1ze5tiEbyUWm8+VJ6W1/erVvBMIA==", + "version": "6.7.1", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.7.1.tgz", + "integrity": "sha512-hSxf9S0uB+GV+gBsjY1FZNo53e1FFdzPceRfCfD1gWOnV6o21GfB5J5Wg9G/4h76XZMPrF0A6OCK/Rz5+V1egg==", "dependencies": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.0.0", @@ -2097,9 +2097,9 @@ } }, "node_modules/@codemirror/commands": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.2.0.tgz", - "integrity": "sha512-+00smmZBradoGFEkRjliN7BjqPh/Hx0KCHWOEibUmflUqZz2RwBTU0MrVovEEHozhx3AUSGcO/rl3/5f9e9Biw==", + "version": "6.2.4", + "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.2.4.tgz", + "integrity": "sha512-42lmDqVH0ttfilLShReLXsDfASKLXzfyC36bzwcqzox9PlHulMcsUOfHXNo2X2aFMVNUoQ7j+d4q5bnfseYoOA==", "dependencies": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.2.0", @@ -2108,9 +2108,9 @@ } }, "node_modules/@codemirror/language": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.4.0.tgz", - "integrity": "sha512-Wzb7GnNj8vnEtbPWiOy9H0m1fBtE28kepQNGLXekU2EEZv43BF865VKITUn+NoV8OpW6gRtvm29YEhqm46927Q==", + "version": "6.7.0", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.7.0.tgz", + "integrity": "sha512-4SMwe6Fwn57klCUsVN0y4/h/iWT+XIXFEmop2lIHHuWO0ubjCrF3suqSZLyOQlznxkNnNbOOfKe5HQbQGCAmTg==", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -2121,9 +2121,9 @@ } }, "node_modules/@codemirror/lint": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.1.0.tgz", - "integrity": "sha512-mdvDQrjRmYPvQ3WrzF6Ewaao+NWERYtpthJvoQ3tK3t/44Ynhk8ZGjTSL9jMEv8CgSMogmt75X8ceOZRDSXHtQ==", + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.2.2.tgz", + "integrity": "sha512-kHGuynBHjqinp1Bx25D2hgH8a6Fh1m9rSmZFzBVTqPIXDIcZ6j3VI67DY8USGYpGrjrJys9R52eLxtfERGNozg==", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -2131,9 +2131,9 @@ } }, "node_modules/@codemirror/search": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.2.3.tgz", - "integrity": "sha512-V9n9233lopQhB1dyjsBK2Wc1i+8hcCqxl1wQ46c5HWWLePoe4FluV3TGHoZ04rBRlGjNyz9DTmpJErig8UE4jw==", + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.0.tgz", + "integrity": "sha512-64/M40YeJPToKvGO6p3fijo2vwUEj4nACEAXElCaYQ50HrXSvRaK+NHEhSh73WFBGdvIdhrV+lL9PdJy2RfCYA==", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -2141,14 +2141,14 @@ } }, "node_modules/@codemirror/state": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.2.0.tgz", - "integrity": "sha512-69QXtcrsc3RYtOtd+GsvczJ319udtBf1PTrr2KbLWM/e2CXUPnh0Nz9AUo8WfhSQ7GeL8dPVNUmhQVgpmuaNGA==" + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.2.1.tgz", + "integrity": "sha512-RupHSZ8+OjNT38zU9fKH2sv+Dnlr8Eb8sl4NOnnqz95mCFTZUaiRP8Xv5MeeaG0px2b8Bnfe7YGwCV3nsBhbuw==" }, "node_modules/@codemirror/view": { - "version": "6.7.3", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.7.3.tgz", - "integrity": "sha512-Lt+4POnhXrZFfHOdPzXEHxrzwdy7cjqYlMkOWvoFGi6/bAsjzlFfr0NY3B15B/PGx+cDFgM1hlc12wvYeZbGLw==", + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.13.0.tgz", + "integrity": "sha512-oXTfJzHJ5Tl7f6T8ZO0HKf981zubxgKohjddLobbntbNZHlOZGMRL+pPZGtclDWFaFJWtGBYRGyNdjQ6Xsx5yA==", "dependencies": { "@codemirror/state": "^6.1.4", "style-mod": "^4.0.0", @@ -2476,42 +2476,33 @@ } }, "node_modules/@fortawesome/fontawesome-common-types": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.2.1.tgz", - "integrity": "sha512-Sz07mnQrTekFWLz5BMjOzHl/+NooTdW8F8kDQxjWwbpOJcnoSg4vUDng8d/WR1wOxM0O+CY9Zw0nR054riNYtQ==", + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.4.0.tgz", + "integrity": "sha512-HNii132xfomg5QVZw0HwXXpN22s7VBHQBv9CeOu9tfJnhsWQNd2lmTNi8CSrnw5B+5YOmzu1UoPAyxaXsJ6RgQ==", "hasInstallScript": true, "engines": { "node": ">=6" } }, "node_modules/@fortawesome/fontawesome-svg-core": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.2.1.tgz", - "integrity": "sha512-HELwwbCz6C1XEcjzyT1Jugmz2NNklMrSPjZOWMlc+ZsHIVk+XOvOXLGGQtFBwSyqfJDNgRq4xBCwWOaZ/d9DEA==", + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.4.0.tgz", + "integrity": "sha512-Bertv8xOiVELz5raB2FlXDPKt+m94MQ3JgDfsVbrqNpLU9+UE2E18GKjLKw+d3XbeYPqg1pzyQKGsrzbw+pPaw==", "hasInstallScript": true, "dependencies": { - "@fortawesome/fontawesome-common-types": "6.2.1" + "@fortawesome/fontawesome-common-types": "6.4.0" }, "engines": { "node": ">=6" } }, - "node_modules/@fortawesome/fontawesome-svg-core/node_modules/@fortawesome/fontawesome-common-types": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.2.1.tgz", - "integrity": "sha512-Sz07mnQrTekFWLz5BMjOzHl/+NooTdW8F8kDQxjWwbpOJcnoSg4vUDng8d/WR1wOxM0O+CY9Zw0nR054riNYtQ==", - "hasInstallScript": true, - "engines": { - "node": ">=6" - } - }, "node_modules/@fortawesome/free-solid-svg-icons": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.2.1.tgz", - "integrity": "sha512-oKuqrP5jbfEPJWTij4sM+/RvgX+RMFwx3QZCZcK9PrBDgxC35zuc7AOFsyMjMd/PIFPeB2JxyqDr5zs/DZFPPw==", + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.4.0.tgz", + "integrity": "sha512-kutPeRGWm8V5dltFP1zGjQOEAzaLZj4StdQhWVZnfGFCvAPVvHh8qk5bRrU4KXnRRRNni5tKQI9PBAdI6MP8nQ==", "hasInstallScript": true, "dependencies": { - "@fortawesome/fontawesome-common-types": "6.2.1" + "@fortawesome/fontawesome-common-types": "6.4.0" }, "engines": { "node": ">=6" @@ -3510,14 +3501,14 @@ "dev": true }, "node_modules/@lezer/common": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.0.2.tgz", - "integrity": "sha512-SVgiGtMnMnW3ActR8SXgsDhw7a0w0ChHSYAyAUxxrOiJ1OqYWEKk/xJd84tTSPo1mo6DXLObAJALNnd0Hrv7Ng==" + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.0.3.tgz", + "integrity": "sha512-JH4wAXCgUOcCGNekQPLhVeUtIqjH0yPBs7vvUdSjyQama9618IOKFJwkv2kcqdhF0my8hQEgCTEJU0GIgnahvA==" }, "node_modules/@lezer/generator": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.2.2.tgz", - "integrity": "sha512-O//eH9jTPM1GnbZruuD23xU68Pkuragonn1DEIom4Kt/eJN/QFt7Vzvp1YjV/XBmoUKC+2ySPgrA5fMF9FMM2g==", + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.2.3.tgz", + "integrity": "sha512-xRmNryYbJpWs7novjWtQLCGHOj71B4X1QHQ4SgJqwm11tl6COEVAGhuFTXKX16JMJUhumdXaX8We6hEMd4clDg==", "dev": true, "dependencies": { "@lezer/common": "^1.0.2", @@ -3528,17 +3519,17 @@ } }, "node_modules/@lezer/highlight": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.1.3.tgz", - "integrity": "sha512-3vLKLPThO4td43lYRBygmMY18JN3CPh9w+XS2j8WC30vR4yZeFG4z1iFe4jXE43NtGqe//zHW5q8ENLlHvz9gw==", + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.1.6.tgz", + "integrity": "sha512-cmSJYa2us+r3SePpRCjN5ymCqCPv+zyXmDl0ciWtVaNiORT/MxM7ZgOMQZADD0o51qOaOg24qc/zBViOIwAjJg==", "dependencies": { "@lezer/common": "^1.0.0" } }, "node_modules/@lezer/lr": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.1.tgz", - "integrity": "sha512-+GymJB/+3gThkk2zHwseaJTI5oa4AuOuj1I2LCslAVq1dFZLSX8SAe4ZlJq1TjezteDXtF/+d4qeWz9JvnrG9Q==", + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.6.tgz", + "integrity": "sha512-IDhcWjfxwWACnatUi0GzWBCbochfqxo3LZZlS27LbJh8RVYYXXyR5Ck9659IhkWkhSW/kZlaaiJpUO+YZTUK+Q==", "dependencies": { "@lezer/common": "^1.0.0" } @@ -4209,13 +4200,24 @@ } }, "node_modules/@types/enzyme": { - "version": "3.10.12", - "resolved": "https://registry.npmjs.org/@types/enzyme/-/enzyme-3.10.12.tgz", - "integrity": "sha512-xryQlOEIe1TduDWAOphR0ihfebKFSWOXpIsk+70JskCfRfW+xALdnJ0r1ZOTo85F9Qsjk6vtlU7edTYHbls9tA==", + "version": "3.10.13", + "resolved": "https://registry.npmjs.org/@types/enzyme/-/enzyme-3.10.13.tgz", + "integrity": "sha512-FCtoUhmFsud0Yx9fmZk179GkdZ4U9B0GFte64/Md+W/agx0L5SxsIIbhLBOxIb9y2UfBA4WQnaG1Od/UsUQs9Q==", "dev": true, "dependencies": { "@types/cheerio": "*", - "@types/react": "*" + "@types/react": "^16" + } + }, + "node_modules/@types/enzyme/node_modules/@types/react": { + "version": "16.14.42", + "resolved": "https://registry.npmjs.org/@types/react/-/react-16.14.42.tgz", + "integrity": "sha512-r6lbqQBJsQ5JJ0fp5I1+F3weosNhk7jOEcKeusIlCDYUK6kCpvIkYCamBNqGyS6WEztYlT8wmAVgblV0HxOFoA==", + "dev": true, + "dependencies": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" } }, "node_modules/@types/eslint": { @@ -4330,9 +4332,9 @@ } }, "node_modules/@types/jest": { - "version": "29.4.0", - "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.4.0.tgz", - "integrity": "sha512-VaywcGQ9tPorCX/Jkkni7RWGFfI11whqzs8dvxF41P17Z+z872thvEvlIbznjPJ02kl1HMX3LmLOonsj2n7HeQ==", + "version": "29.5.2", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.2.tgz", + "integrity": "sha512-mSoZVJF5YzGVCk+FsDxzDuH7s+SCkzrgKZzf0Z0T2WudhBUPoF6ktoTPC4R0ZoCPCV5xUvuU6ias5NvxcBcMMg==", "dev": true, "dependencies": { "expect": "^29.0.0", @@ -4466,9 +4468,9 @@ } }, "node_modules/@types/react-dom": { - "version": "17.0.18", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.18.tgz", - "integrity": "sha512-rLVtIfbwyur2iFKykP2w0pl/1unw26b5td16d5xMgp7/yjTHomkyxPYChFoCr/FtEX1lN9wY6lFj1qvKdS5kDw==", + "version": "17.0.20", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.20.tgz", + "integrity": "sha512-4pzIjSxDueZZ90F52mU3aPoogkHIoSIDG+oQ+wQK7Cy2B9S+MvOqY0uEA/qawKz381qrEDkvpwyt8Bm31I8sbA==", "dev": true, "dependencies": { "@types/react": "^17" @@ -4520,9 +4522,9 @@ "dev": true }, "node_modules/@types/sanitize-html": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.8.0.tgz", - "integrity": "sha512-Uih6caOm3DsBYnVGOYn0A9NoTNe1c4aPStmHC/YA2JrpP9kx//jzaRcIklFvSpvVQEcpl/ZCr4DgISSf/YxTvg==", + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.9.0.tgz", + "integrity": "sha512-4fP/kEcKNj2u39IzrxWYuf/FnCCwwQCpif6wwY6ROUS1EPRIfWJjGkY3HIowY1EX/VbX5e86yq8AAE7UPMgATg==", "dev": true, "dependencies": { "htmlparser2": "^8.0.0" @@ -4573,9 +4575,9 @@ } }, "node_modules/@types/sinon": { - "version": "10.0.13", - "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-10.0.13.tgz", - "integrity": "sha512-UVjDqJblVNQYvVNUsj0PuYYw0ELRmgt1Nt5Vk0pT5f16ROGfcKJY8o1HVuMOJOpD727RrGB9EGvoaTQE5tgxZQ==", + "version": "10.0.15", + "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-10.0.15.tgz", + "integrity": "sha512-3lrFNQG0Kr2LDzvjyjB6AMJk4ge+8iYhQfdnSwIwlG88FUOV43kPcQqDZkDa/h3WSZy6i8Fr0BSjfQtB1B3xuQ==", "dev": true, "dependencies": { "@types/sinonjs__fake-timers": "*" @@ -7557,9 +7559,9 @@ "dev": true }, "node_modules/downshift": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.2.0.tgz", - "integrity": "sha512-dEn1Sshe7iTelUhmdbmiJhtIiwIBxBV8p15PuvEBh0qZcHXZnEt0geuCIIkCL4+ooaKRuLE0Wc+Fz9SwWuBIyg==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.6.0.tgz", + "integrity": "sha512-VSoTVynTAsabou/hbZ6HJHUVhtBiVOjQoBsCPcQq5eAROIGP+9XKMp9asAKQ3cEcUP4oe0fFdD2pziUjhFY33Q==", "dependencies": { "@babel/runtime": "^7.14.8", "compute-scroll-into-view": "^2.0.4", @@ -7984,9 +7986,9 @@ } }, "node_modules/eslint-config-prettier": { - "version": "8.6.0", - "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.6.0.tgz", - "integrity": "sha512-bAF0eLpLVqP5oEVUFKpMA+NnRFICwn9X8B5jrR9FcqnYBuPbqWEjTEspPWMj5ye6czoSLDweCzSo3Ko7gGrZaA==", + "version": "8.8.0", + "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.8.0.tgz", + "integrity": "sha512-wLbQiFre3tdGgpDv67NQKnJuTlcUVYHas3k+DZCc2U2BadthoEY4B7hLPvAxaqdyOGCzuLfii2fqGph10va7oA==", "dev": true, "bin": { "eslint-config-prettier": "bin/cli.js" @@ -10550,9 +10552,9 @@ } }, "node_modules/jest-canvas-mock": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/jest-canvas-mock/-/jest-canvas-mock-2.4.0.tgz", - "integrity": "sha512-mmMpZzpmLzn5vepIaHk5HoH3Ka4WykbSoLuG/EKoJd0x0ID/t+INo1l8ByfcUJuDM+RIsL4QDg/gDnBbrj2/IQ==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/jest-canvas-mock/-/jest-canvas-mock-2.5.1.tgz", + "integrity": "sha512-IVnRiz+v4EYn3ydM/pBo8GW/J+nU/Hg5gHBQQOUQhdRyNfvHnabB8ReqARLO0p+kvQghqr4V0tA92CF3JcUSRg==", "dev": true, "dependencies": { "cssfontparser": "^1.2.1", @@ -12836,9 +12838,9 @@ } }, "node_modules/jquery": { - "version": "3.6.3", - "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.6.3.tgz", - "integrity": "sha512-bZ5Sy3YzKo9Fyc8wH2iIQK4JImJ6R0GWI9kL1/k7Z91ZBNgkRXE6U0JfHIizZbort8ZunhSI3jw9I6253ahKfg==" + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.7.0.tgz", + "integrity": "sha512-umpJ0/k8X0MvD1ds0P9SfowREz2LenHsQaxSohMZ5OMNEU2r0tf8pdeEFTHMFxWVxKNyU9rTtK3CWzUCTKJUeQ==" }, "node_modules/jquery.flot.tooltip": { "version": "0.9.0", @@ -13488,11 +13490,11 @@ } }, "node_modules/moment-timezone": { - "version": "0.5.40", - "resolved": "https://registry.npmjs.org/moment-timezone/-/moment-timezone-0.5.40.tgz", - "integrity": "sha512-tWfmNkRYmBkPJz5mr9GVDn9vRlVZOTe6yqY92rFxiOdWXbjaR0+9LwQnZGGuNR63X456NqmEkbskte8tWL5ePg==", + "version": "0.5.43", + "resolved": "https://registry.npmjs.org/moment-timezone/-/moment-timezone-0.5.43.tgz", + "integrity": "sha512-72j3aNyuIsDxdF1i7CEgV2FfxM1r6aaqJyLB2vwb33mXYyoyLly+F1zbWqhA3/bVIoJ4szlUoMbUnVdid32NUQ==", "dependencies": { - "moment": ">= 2.9.0" + "moment": "^2.29.4" }, "engines": { "node": "*" @@ -13640,9 +13642,9 @@ } }, "node_modules/nock": { - "version": "13.3.0", - "resolved": "https://registry.npmjs.org/nock/-/nock-13.3.0.tgz", - "integrity": "sha512-HHqYQ6mBeiMc+N038w8LkMpDCRquCHWeNmN3v6645P3NhN2+qXOBqvPqo7Rt1VyCMzKhJ733wZqw5B7cQVFNPg==", + "version": "13.3.1", + "resolved": "https://registry.npmjs.org/nock/-/nock-13.3.1.tgz", + "integrity": "sha512-vHnopocZuI93p2ccivFyGuUfzjq2fxNyNurp7816mlT5V5HF4SzXu8lvLrVzBbNqzs+ODooZ6OksuSUNM7Njkw==", "dev": true, "dependencies": { "debug": "^4.1.0", @@ -15708,9 +15710,9 @@ } }, "node_modules/prettier": { - "version": "2.8.3", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.8.3.tgz", - "integrity": "sha512-tJ/oJ4amDihPoufT5sM0Z1SKEuKay8LfVAMlbbhnnkvt6BUserZylqo2PN+p9KeljLr0OHa2rXHU1T8reeoTrw==", + "version": "2.8.8", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.8.8.tgz", + "integrity": "sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==", "dev": true, "bin": { "prettier": "bin-prettier.js" @@ -18022,9 +18024,9 @@ "dev": true }, "node_modules/sanitize-html": { - "version": "2.8.1", - "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.8.1.tgz", - "integrity": "sha512-qK5neD0SaMxGwVv5txOYv05huC3o6ZAA4h5+7nJJgWMNFUNRjcjLO6FpwAtKzfKCZ0jrG6xTk6eVFskbvOGblg==", + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.10.0.tgz", + "integrity": "sha512-JqdovUd81dG4k87vZt6uA6YhDfWkUGruUu/aPmXLxXi45gZExnt9Bnw/qeQU8oGf82vPyaE0vO4aH0PbobB9JQ==", "dependencies": { "deepmerge": "^4.2.2", "escape-string-regexp": "^4.0.0", @@ -18059,9 +18061,9 @@ "dev": true }, "node_modules/sass": { - "version": "1.57.1", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.57.1.tgz", - "integrity": "sha512-O2+LwLS79op7GI0xZ8fqzF7X2m/m8WFfI02dHOdsK5R2ECeS5F62zrwg/relM1rjSLy7Vd/DiMNIvPrQGsA0jw==", + "version": "1.62.1", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.62.1.tgz", + "integrity": "sha512-NHpxIzN29MXvWiuswfc1W3I0N8SXBd8UR26WntmDlRYf0bSADnwnOjsyMZ3lMezSlArD33Vs3YFhp7dWvL770A==", "dependencies": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", @@ -18071,7 +18073,7 @@ "sass": "sass.js" }, "engines": { - "node": ">=12.0.0" + "node": ">=14.0.0" } }, "node_modules/sass-loader": { @@ -19378,9 +19380,9 @@ "dev": true }, "node_modules/ts-jest": { - "version": "29.0.5", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.0.5.tgz", - "integrity": "sha512-PL3UciSgIpQ7f6XjVOmbi96vmDHUqAyqDr8YxzopDqX3kfgYtX1cuNeBjP+L9sFXi6nzsGGA6R3fP3DDDJyrxA==", + "version": "29.1.0", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.0.tgz", + "integrity": "sha512-ZhNr7Z4PcYa+JjMl62ir+zPiNJfXJN6E8hSLnaUKhOgqcn8vb3e537cpkd0FuAfRK3sR1LSqM1MOhliXNgOFPA==", "dev": true, "dependencies": { "bs-logger": "0.x", @@ -19403,7 +19405,7 @@ "@jest/types": "^29.0.0", "babel-jest": "^29.0.0", "jest": "^29.0.0", - "typescript": ">=4.3" + "typescript": ">=4.3 <6" }, "peerDependenciesMeta": { "@babel/core": { @@ -19572,9 +19574,9 @@ } }, "node_modules/typescript": { - "version": "4.9.4", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.4.tgz", - "integrity": "sha512-Uz+dTXYzxXXbsFpM86Wh3dKCxrQqUcVMxwU54orwlJjOpO3ao8L7j5lH+dWfTwgCwIuM9GQ2kvVotzYJMXTBZg==", + "version": "4.9.5", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.5.tgz", + "integrity": "sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==", "dev": true, "bin": { "tsc": "bin/tsc", @@ -20765,31 +20767,31 @@ "name": "@prometheus-io/app", "version": "0.44.0", "dependencies": { - "@codemirror/autocomplete": "^6.4.0", - "@codemirror/commands": "^6.2.0", - "@codemirror/language": "^6.4.0", - "@codemirror/lint": "^6.1.0", - "@codemirror/search": "^6.2.3", - "@codemirror/state": "^6.2.0", - "@codemirror/view": "^6.7.3", + "@codemirror/autocomplete": "^6.7.1", + "@codemirror/commands": "^6.2.4", + "@codemirror/language": "^6.7.0", + "@codemirror/lint": "^6.2.2", + "@codemirror/search": "^6.5.0", + "@codemirror/state": "^6.2.1", + "@codemirror/view": "^6.13.0", "@forevolve/bootstrap-dark": "^2.1.1", - "@fortawesome/fontawesome-svg-core": "6.2.1", - "@fortawesome/free-solid-svg-icons": "6.2.1", + "@fortawesome/fontawesome-svg-core": "6.4.0", + "@fortawesome/free-solid-svg-icons": "6.4.0", "@fortawesome/react-fontawesome": "0.2.0", - "@lezer/common": "^1.0.2", - "@lezer/highlight": "^1.1.3", - "@lezer/lr": "^1.3.1", + "@lezer/common": "^1.0.3", + "@lezer/highlight": "^1.1.6", + "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", "@prometheus-io/codemirror-promql": "0.44.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", - "downshift": "^7.2.0", + "downshift": "^7.6.0", "http-proxy-middleware": "^2.0.6", - "jquery": "^3.6.3", + "jquery": "^3.7.0", "jquery.flot.tooltip": "^0.9.0", "moment": "^2.29.4", - "moment-timezone": "^0.5.40", + "moment-timezone": "^0.5.43", "popper.js": "^1.14.3", "react": "^17.0.2", "react-copy-to-clipboard": "^5.1.0", @@ -20799,22 +20801,22 @@ "react-router-dom": "^5.3.4", "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", - "sanitize-html": "^2.8.1", - "sass": "1.57.1", + "sanitize-html": "^2.10.0", + "sass": "1.62.1", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" }, "devDependencies": { "@testing-library/react-hooks": "^7.0.2", - "@types/enzyme": "^3.10.12", + "@types/enzyme": "^3.10.13", "@types/flot": "0.0.32", "@types/jquery": "^3.5.16", - "@types/react": "^17.0.53", + "@types/react": "^17.0.60", "@types/react-copy-to-clipboard": "^5.0.4", - "@types/react-dom": "^17.0.18", + "@types/react-dom": "^17.0.20", "@types/react-router-dom": "^5.3.3", - "@types/sanitize-html": "^2.8.0", - "@types/sinon": "^10.0.13", + "@types/sanitize-html": "^2.9.0", + "@types/sinon": "^10.0.15", "@wojtekmaj/enzyme-adapter-react-17": "^0.8.0", "enzyme": "^3.11.0", "enzyme-to-json": "^3.6.2", @@ -20824,6 +20826,17 @@ "optionalDependencies": { "fsevents": "^2.3.2" } + }, + "react-app/node_modules/@types/react": { + "version": "17.0.60", + "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.60.tgz", + "integrity": "sha512-pCH7bqWIfzHs3D+PDs3O/COCQJka+Kcw3RnO9rFA2zalqoXg7cNjJDh6mZ7oRtY1wmY4LVwDdAbA1F7Z8tv3BQ==", + "dev": true, + "dependencies": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } } }, "dependencies": { @@ -22224,9 +22237,9 @@ "dev": true }, "@codemirror/autocomplete": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.4.0.tgz", - "integrity": "sha512-HLF2PnZAm1s4kGs30EiqKMgD7XsYaQ0XJnMR0rofEWQ5t5D60SfqpDIkIh1ze5tiEbyUWm8+VJ6W1/erVvBMIA==", + "version": "6.7.1", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.7.1.tgz", + "integrity": "sha512-hSxf9S0uB+GV+gBsjY1FZNo53e1FFdzPceRfCfD1gWOnV6o21GfB5J5Wg9G/4h76XZMPrF0A6OCK/Rz5+V1egg==", "requires": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.0.0", @@ -22235,9 +22248,9 @@ } }, "@codemirror/commands": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.2.0.tgz", - "integrity": "sha512-+00smmZBradoGFEkRjliN7BjqPh/Hx0KCHWOEibUmflUqZz2RwBTU0MrVovEEHozhx3AUSGcO/rl3/5f9e9Biw==", + "version": "6.2.4", + "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.2.4.tgz", + "integrity": "sha512-42lmDqVH0ttfilLShReLXsDfASKLXzfyC36bzwcqzox9PlHulMcsUOfHXNo2X2aFMVNUoQ7j+d4q5bnfseYoOA==", "requires": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.2.0", @@ -22246,9 +22259,9 @@ } }, "@codemirror/language": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.4.0.tgz", - "integrity": "sha512-Wzb7GnNj8vnEtbPWiOy9H0m1fBtE28kepQNGLXekU2EEZv43BF865VKITUn+NoV8OpW6gRtvm29YEhqm46927Q==", + "version": "6.7.0", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.7.0.tgz", + "integrity": "sha512-4SMwe6Fwn57klCUsVN0y4/h/iWT+XIXFEmop2lIHHuWO0ubjCrF3suqSZLyOQlznxkNnNbOOfKe5HQbQGCAmTg==", "requires": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -22259,9 +22272,9 @@ } }, "@codemirror/lint": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.1.0.tgz", - "integrity": "sha512-mdvDQrjRmYPvQ3WrzF6Ewaao+NWERYtpthJvoQ3tK3t/44Ynhk8ZGjTSL9jMEv8CgSMogmt75X8ceOZRDSXHtQ==", + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.2.2.tgz", + "integrity": "sha512-kHGuynBHjqinp1Bx25D2hgH8a6Fh1m9rSmZFzBVTqPIXDIcZ6j3VI67DY8USGYpGrjrJys9R52eLxtfERGNozg==", "requires": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -22269,9 +22282,9 @@ } }, "@codemirror/search": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.2.3.tgz", - "integrity": "sha512-V9n9233lopQhB1dyjsBK2Wc1i+8hcCqxl1wQ46c5HWWLePoe4FluV3TGHoZ04rBRlGjNyz9DTmpJErig8UE4jw==", + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.0.tgz", + "integrity": "sha512-64/M40YeJPToKvGO6p3fijo2vwUEj4nACEAXElCaYQ50HrXSvRaK+NHEhSh73WFBGdvIdhrV+lL9PdJy2RfCYA==", "requires": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -22279,14 +22292,14 @@ } }, "@codemirror/state": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.2.0.tgz", - "integrity": "sha512-69QXtcrsc3RYtOtd+GsvczJ319udtBf1PTrr2KbLWM/e2CXUPnh0Nz9AUo8WfhSQ7GeL8dPVNUmhQVgpmuaNGA==" + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.2.1.tgz", + "integrity": "sha512-RupHSZ8+OjNT38zU9fKH2sv+Dnlr8Eb8sl4NOnnqz95mCFTZUaiRP8Xv5MeeaG0px2b8Bnfe7YGwCV3nsBhbuw==" }, "@codemirror/view": { - "version": "6.7.3", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.7.3.tgz", - "integrity": "sha512-Lt+4POnhXrZFfHOdPzXEHxrzwdy7cjqYlMkOWvoFGi6/bAsjzlFfr0NY3B15B/PGx+cDFgM1hlc12wvYeZbGLw==", + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.13.0.tgz", + "integrity": "sha512-oXTfJzHJ5Tl7f6T8ZO0HKf981zubxgKohjddLobbntbNZHlOZGMRL+pPZGtclDWFaFJWtGBYRGyNdjQ6Xsx5yA==", "requires": { "@codemirror/state": "^6.1.4", "style-mod": "^4.0.0", @@ -22463,31 +22476,24 @@ } }, "@fortawesome/fontawesome-common-types": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.2.1.tgz", - "integrity": "sha512-Sz07mnQrTekFWLz5BMjOzHl/+NooTdW8F8kDQxjWwbpOJcnoSg4vUDng8d/WR1wOxM0O+CY9Zw0nR054riNYtQ==" + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.4.0.tgz", + "integrity": "sha512-HNii132xfomg5QVZw0HwXXpN22s7VBHQBv9CeOu9tfJnhsWQNd2lmTNi8CSrnw5B+5YOmzu1UoPAyxaXsJ6RgQ==" }, "@fortawesome/fontawesome-svg-core": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.2.1.tgz", - "integrity": "sha512-HELwwbCz6C1XEcjzyT1Jugmz2NNklMrSPjZOWMlc+ZsHIVk+XOvOXLGGQtFBwSyqfJDNgRq4xBCwWOaZ/d9DEA==", + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.4.0.tgz", + "integrity": "sha512-Bertv8xOiVELz5raB2FlXDPKt+m94MQ3JgDfsVbrqNpLU9+UE2E18GKjLKw+d3XbeYPqg1pzyQKGsrzbw+pPaw==", "requires": { - "@fortawesome/fontawesome-common-types": "6.2.1" - }, - "dependencies": { - "@fortawesome/fontawesome-common-types": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.2.1.tgz", - "integrity": "sha512-Sz07mnQrTekFWLz5BMjOzHl/+NooTdW8F8kDQxjWwbpOJcnoSg4vUDng8d/WR1wOxM0O+CY9Zw0nR054riNYtQ==" - } + "@fortawesome/fontawesome-common-types": "6.4.0" } }, "@fortawesome/free-solid-svg-icons": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.2.1.tgz", - "integrity": "sha512-oKuqrP5jbfEPJWTij4sM+/RvgX+RMFwx3QZCZcK9PrBDgxC35zuc7AOFsyMjMd/PIFPeB2JxyqDr5zs/DZFPPw==", + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.4.0.tgz", + "integrity": "sha512-kutPeRGWm8V5dltFP1zGjQOEAzaLZj4StdQhWVZnfGFCvAPVvHh8qk5bRrU4KXnRRRNni5tKQI9PBAdI6MP8nQ==", "requires": { - "@fortawesome/fontawesome-common-types": "6.2.1" + "@fortawesome/fontawesome-common-types": "6.4.0" } }, "@fortawesome/react-fontawesome": { @@ -23277,14 +23283,14 @@ "dev": true }, "@lezer/common": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.0.2.tgz", - "integrity": "sha512-SVgiGtMnMnW3ActR8SXgsDhw7a0w0ChHSYAyAUxxrOiJ1OqYWEKk/xJd84tTSPo1mo6DXLObAJALNnd0Hrv7Ng==" + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.0.3.tgz", + "integrity": "sha512-JH4wAXCgUOcCGNekQPLhVeUtIqjH0yPBs7vvUdSjyQama9618IOKFJwkv2kcqdhF0my8hQEgCTEJU0GIgnahvA==" }, "@lezer/generator": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.2.2.tgz", - "integrity": "sha512-O//eH9jTPM1GnbZruuD23xU68Pkuragonn1DEIom4Kt/eJN/QFt7Vzvp1YjV/XBmoUKC+2ySPgrA5fMF9FMM2g==", + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.2.3.tgz", + "integrity": "sha512-xRmNryYbJpWs7novjWtQLCGHOj71B4X1QHQ4SgJqwm11tl6COEVAGhuFTXKX16JMJUhumdXaX8We6hEMd4clDg==", "dev": true, "requires": { "@lezer/common": "^1.0.2", @@ -23292,17 +23298,17 @@ } }, "@lezer/highlight": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.1.3.tgz", - "integrity": "sha512-3vLKLPThO4td43lYRBygmMY18JN3CPh9w+XS2j8WC30vR4yZeFG4z1iFe4jXE43NtGqe//zHW5q8ENLlHvz9gw==", + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.1.6.tgz", + "integrity": "sha512-cmSJYa2us+r3SePpRCjN5ymCqCPv+zyXmDl0ciWtVaNiORT/MxM7ZgOMQZADD0o51qOaOg24qc/zBViOIwAjJg==", "requires": { "@lezer/common": "^1.0.0" } }, "@lezer/lr": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.1.tgz", - "integrity": "sha512-+GymJB/+3gThkk2zHwseaJTI5oa4AuOuj1I2LCslAVq1dFZLSX8SAe4ZlJq1TjezteDXtF/+d4qeWz9JvnrG9Q==", + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.6.tgz", + "integrity": "sha512-IDhcWjfxwWACnatUi0GzWBCbochfqxo3LZZlS27LbJh8RVYYXXyR5Ck9659IhkWkhSW/kZlaaiJpUO+YZTUK+Q==", "requires": { "@lezer/common": "^1.0.0" } @@ -23401,45 +23407,45 @@ "@prometheus-io/app": { "version": "file:react-app", "requires": { - "@codemirror/autocomplete": "^6.4.0", - "@codemirror/commands": "^6.2.0", - "@codemirror/language": "^6.4.0", - "@codemirror/lint": "^6.1.0", - "@codemirror/search": "^6.2.3", - "@codemirror/state": "^6.2.0", - "@codemirror/view": "^6.7.3", + "@codemirror/autocomplete": "^6.7.1", + "@codemirror/commands": "^6.2.4", + "@codemirror/language": "^6.7.0", + "@codemirror/lint": "^6.2.2", + "@codemirror/search": "^6.5.0", + "@codemirror/state": "^6.2.1", + "@codemirror/view": "^6.13.0", "@forevolve/bootstrap-dark": "^2.1.1", - "@fortawesome/fontawesome-svg-core": "6.2.1", - "@fortawesome/free-solid-svg-icons": "6.2.1", + "@fortawesome/fontawesome-svg-core": "6.4.0", + "@fortawesome/free-solid-svg-icons": "6.4.0", "@fortawesome/react-fontawesome": "0.2.0", - "@lezer/common": "^1.0.2", - "@lezer/highlight": "^1.1.3", - "@lezer/lr": "^1.3.1", + "@lezer/common": "^1.0.3", + "@lezer/highlight": "^1.1.6", + "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", "@prometheus-io/codemirror-promql": "0.44.0", "@testing-library/react-hooks": "^7.0.2", - "@types/enzyme": "^3.10.12", + "@types/enzyme": "^3.10.13", "@types/flot": "0.0.32", "@types/jquery": "^3.5.16", - "@types/react": "^17.0.53", + "@types/react": "^17.0.60", "@types/react-copy-to-clipboard": "^5.0.4", - "@types/react-dom": "^17.0.18", + "@types/react-dom": "^17.0.20", "@types/react-router-dom": "^5.3.3", - "@types/sanitize-html": "^2.8.0", - "@types/sinon": "^10.0.13", + "@types/sanitize-html": "^2.9.0", + "@types/sinon": "^10.0.15", "@wojtekmaj/enzyme-adapter-react-17": "^0.8.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", - "downshift": "^7.2.0", + "downshift": "^7.6.0", "enzyme": "^3.11.0", "enzyme-to-json": "^3.6.2", "fsevents": "^2.3.2", "http-proxy-middleware": "^2.0.6", - "jquery": "^3.6.3", + "jquery": "^3.7.0", "jquery.flot.tooltip": "^0.9.0", "moment": "^2.29.4", - "moment-timezone": "^0.5.40", + "moment-timezone": "^0.5.43", "mutationobserver-shim": "^0.3.7", "popper.js": "^1.14.3", "react": "^17.0.2", @@ -23450,37 +23456,50 @@ "react-router-dom": "^5.3.4", "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", - "sanitize-html": "^2.8.1", - "sass": "1.57.1", + "sanitize-html": "^2.10.0", + "sass": "1.62.1", "sinon": "^14.0.2", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" + }, + "dependencies": { + "@types/react": { + "version": "17.0.60", + "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.60.tgz", + "integrity": "sha512-pCH7bqWIfzHs3D+PDs3O/COCQJka+Kcw3RnO9rFA2zalqoXg7cNjJDh6mZ7oRtY1wmY4LVwDdAbA1F7Z8tv3BQ==", + "dev": true, + "requires": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } + } } }, "@prometheus-io/codemirror-promql": { "version": "file:module/codemirror-promql", "requires": { - "@codemirror/autocomplete": "^6.4.0", - "@codemirror/language": "^6.4.0", - "@codemirror/lint": "^6.1.0", - "@codemirror/state": "^6.2.0", - "@codemirror/view": "^6.7.3", - "@lezer/common": "^1.0.2", - "@lezer/highlight": "^1.1.3", - "@lezer/lr": "^1.3.1", + "@codemirror/autocomplete": "^6.7.1", + "@codemirror/language": "^6.7.0", + "@codemirror/lint": "^6.2.2", + "@codemirror/state": "^6.2.1", + "@codemirror/view": "^6.13.0", + "@lezer/common": "^1.0.3", + "@lezer/highlight": "^1.1.6", + "@lezer/lr": "^1.3.6", "@prometheus-io/lezer-promql": "0.44.0", "@types/lru-cache": "^5.1.1", "isomorphic-fetch": "^3.0.0", "lru-cache": "^6.0.0", - "nock": "^13.3.0" + "nock": "^13.3.1" } }, "@prometheus-io/lezer-promql": { "version": "file:module/lezer-promql", "requires": { - "@lezer/generator": "^1.2.2", - "@lezer/highlight": "^1.1.3", - "@lezer/lr": "^1.3.1" + "@lezer/generator": "^1.2.3", + "@lezer/highlight": "^1.1.6", + "@lezer/lr": "^1.3.6" } }, "@rollup/plugin-babel": { @@ -23851,13 +23870,26 @@ } }, "@types/enzyme": { - "version": "3.10.12", - "resolved": "https://registry.npmjs.org/@types/enzyme/-/enzyme-3.10.12.tgz", - "integrity": "sha512-xryQlOEIe1TduDWAOphR0ihfebKFSWOXpIsk+70JskCfRfW+xALdnJ0r1ZOTo85F9Qsjk6vtlU7edTYHbls9tA==", + "version": "3.10.13", + "resolved": "https://registry.npmjs.org/@types/enzyme/-/enzyme-3.10.13.tgz", + "integrity": "sha512-FCtoUhmFsud0Yx9fmZk179GkdZ4U9B0GFte64/Md+W/agx0L5SxsIIbhLBOxIb9y2UfBA4WQnaG1Od/UsUQs9Q==", "dev": true, "requires": { "@types/cheerio": "*", - "@types/react": "*" + "@types/react": "^16" + }, + "dependencies": { + "@types/react": { + "version": "16.14.42", + "resolved": "https://registry.npmjs.org/@types/react/-/react-16.14.42.tgz", + "integrity": "sha512-r6lbqQBJsQ5JJ0fp5I1+F3weosNhk7jOEcKeusIlCDYUK6kCpvIkYCamBNqGyS6WEztYlT8wmAVgblV0HxOFoA==", + "dev": true, + "requires": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } + } } }, "@types/eslint": { @@ -23972,9 +24004,9 @@ } }, "@types/jest": { - "version": "29.4.0", - "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.4.0.tgz", - "integrity": "sha512-VaywcGQ9tPorCX/Jkkni7RWGFfI11whqzs8dvxF41P17Z+z872thvEvlIbznjPJ02kl1HMX3LmLOonsj2n7HeQ==", + "version": "29.5.2", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.2.tgz", + "integrity": "sha512-mSoZVJF5YzGVCk+FsDxzDuH7s+SCkzrgKZzf0Z0T2WudhBUPoF6ktoTPC4R0ZoCPCV5xUvuU6ias5NvxcBcMMg==", "dev": true, "requires": { "expect": "^29.0.0", @@ -24101,9 +24133,9 @@ } }, "@types/react-dom": { - "version": "17.0.18", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.18.tgz", - "integrity": "sha512-rLVtIfbwyur2iFKykP2w0pl/1unw26b5td16d5xMgp7/yjTHomkyxPYChFoCr/FtEX1lN9wY6lFj1qvKdS5kDw==", + "version": "17.0.20", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.20.tgz", + "integrity": "sha512-4pzIjSxDueZZ90F52mU3aPoogkHIoSIDG+oQ+wQK7Cy2B9S+MvOqY0uEA/qawKz381qrEDkvpwyt8Bm31I8sbA==", "dev": true, "requires": { "@types/react": "^17" @@ -24155,9 +24187,9 @@ "dev": true }, "@types/sanitize-html": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.8.0.tgz", - "integrity": "sha512-Uih6caOm3DsBYnVGOYn0A9NoTNe1c4aPStmHC/YA2JrpP9kx//jzaRcIklFvSpvVQEcpl/ZCr4DgISSf/YxTvg==", + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.9.0.tgz", + "integrity": "sha512-4fP/kEcKNj2u39IzrxWYuf/FnCCwwQCpif6wwY6ROUS1EPRIfWJjGkY3HIowY1EX/VbX5e86yq8AAE7UPMgATg==", "dev": true, "requires": { "htmlparser2": "^8.0.0" @@ -24203,9 +24235,9 @@ } }, "@types/sinon": { - "version": "10.0.13", - "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-10.0.13.tgz", - "integrity": "sha512-UVjDqJblVNQYvVNUsj0PuYYw0ELRmgt1Nt5Vk0pT5f16ROGfcKJY8o1HVuMOJOpD727RrGB9EGvoaTQE5tgxZQ==", + "version": "10.0.15", + "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-10.0.15.tgz", + "integrity": "sha512-3lrFNQG0Kr2LDzvjyjB6AMJk4ge+8iYhQfdnSwIwlG88FUOV43kPcQqDZkDa/h3WSZy6i8Fr0BSjfQtB1B3xuQ==", "dev": true, "requires": { "@types/sinonjs__fake-timers": "*" @@ -26441,9 +26473,9 @@ "dev": true }, "downshift": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.2.0.tgz", - "integrity": "sha512-dEn1Sshe7iTelUhmdbmiJhtIiwIBxBV8p15PuvEBh0qZcHXZnEt0geuCIIkCL4+ooaKRuLE0Wc+Fz9SwWuBIyg==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.6.0.tgz", + "integrity": "sha512-VSoTVynTAsabou/hbZ6HJHUVhtBiVOjQoBsCPcQq5eAROIGP+9XKMp9asAKQ3cEcUP4oe0fFdD2pziUjhFY33Q==", "requires": { "@babel/runtime": "^7.14.8", "compute-scroll-into-view": "^2.0.4", @@ -26779,9 +26811,9 @@ } }, "eslint-config-prettier": { - "version": "8.6.0", - "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.6.0.tgz", - "integrity": "sha512-bAF0eLpLVqP5oEVUFKpMA+NnRFICwn9X8B5jrR9FcqnYBuPbqWEjTEspPWMj5ye6czoSLDweCzSo3Ko7gGrZaA==", + "version": "8.8.0", + "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.8.0.tgz", + "integrity": "sha512-wLbQiFre3tdGgpDv67NQKnJuTlcUVYHas3k+DZCc2U2BadthoEY4B7hLPvAxaqdyOGCzuLfii2fqGph10va7oA==", "dev": true, "requires": {} }, @@ -28650,9 +28682,9 @@ } }, "jest-canvas-mock": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/jest-canvas-mock/-/jest-canvas-mock-2.4.0.tgz", - "integrity": "sha512-mmMpZzpmLzn5vepIaHk5HoH3Ka4WykbSoLuG/EKoJd0x0ID/t+INo1l8ByfcUJuDM+RIsL4QDg/gDnBbrj2/IQ==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/jest-canvas-mock/-/jest-canvas-mock-2.5.1.tgz", + "integrity": "sha512-IVnRiz+v4EYn3ydM/pBo8GW/J+nU/Hg5gHBQQOUQhdRyNfvHnabB8ReqARLO0p+kvQghqr4V0tA92CF3JcUSRg==", "dev": true, "requires": { "cssfontparser": "^1.2.1", @@ -30498,9 +30530,9 @@ } }, "jquery": { - "version": "3.6.3", - "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.6.3.tgz", - "integrity": "sha512-bZ5Sy3YzKo9Fyc8wH2iIQK4JImJ6R0GWI9kL1/k7Z91ZBNgkRXE6U0JfHIizZbort8ZunhSI3jw9I6253ahKfg==" + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.7.0.tgz", + "integrity": "sha512-umpJ0/k8X0MvD1ds0P9SfowREz2LenHsQaxSohMZ5OMNEU2r0tf8pdeEFTHMFxWVxKNyU9rTtK3CWzUCTKJUeQ==" }, "jquery.flot.tooltip": { "version": "0.9.0", @@ -31015,11 +31047,11 @@ "integrity": "sha512-5LC9SOxjSc2HF6vO2CyuTDNivEdoz2IvyJJGj6X8DJ0eFyfszE0QiEd+iXmBvUP3WHxSjFH/vIsA0EN00cgr8w==" }, "moment-timezone": { - "version": "0.5.40", - "resolved": "https://registry.npmjs.org/moment-timezone/-/moment-timezone-0.5.40.tgz", - "integrity": "sha512-tWfmNkRYmBkPJz5mr9GVDn9vRlVZOTe6yqY92rFxiOdWXbjaR0+9LwQnZGGuNR63X456NqmEkbskte8tWL5ePg==", + "version": "0.5.43", + "resolved": "https://registry.npmjs.org/moment-timezone/-/moment-timezone-0.5.43.tgz", + "integrity": "sha512-72j3aNyuIsDxdF1i7CEgV2FfxM1r6aaqJyLB2vwb33mXYyoyLly+F1zbWqhA3/bVIoJ4szlUoMbUnVdid32NUQ==", "requires": { - "moment": ">= 2.9.0" + "moment": "^2.29.4" } }, "moo": { @@ -31146,9 +31178,9 @@ } }, "nock": { - "version": "13.3.0", - "resolved": "https://registry.npmjs.org/nock/-/nock-13.3.0.tgz", - "integrity": "sha512-HHqYQ6mBeiMc+N038w8LkMpDCRquCHWeNmN3v6645P3NhN2+qXOBqvPqo7Rt1VyCMzKhJ733wZqw5B7cQVFNPg==", + "version": "13.3.1", + "resolved": "https://registry.npmjs.org/nock/-/nock-13.3.1.tgz", + "integrity": "sha512-vHnopocZuI93p2ccivFyGuUfzjq2fxNyNurp7816mlT5V5HF4SzXu8lvLrVzBbNqzs+ODooZ6OksuSUNM7Njkw==", "dev": true, "requires": { "debug": "^4.1.0", @@ -32479,9 +32511,9 @@ "dev": true }, "prettier": { - "version": "2.8.3", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.8.3.tgz", - "integrity": "sha512-tJ/oJ4amDihPoufT5sM0Z1SKEuKay8LfVAMlbbhnnkvt6BUserZylqo2PN+p9KeljLr0OHa2rXHU1T8reeoTrw==", + "version": "2.8.8", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.8.8.tgz", + "integrity": "sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==", "dev": true }, "prettier-linter-helpers": { @@ -34265,9 +34297,9 @@ "dev": true }, "sanitize-html": { - "version": "2.8.1", - "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.8.1.tgz", - "integrity": "sha512-qK5neD0SaMxGwVv5txOYv05huC3o6ZAA4h5+7nJJgWMNFUNRjcjLO6FpwAtKzfKCZ0jrG6xTk6eVFskbvOGblg==", + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.10.0.tgz", + "integrity": "sha512-JqdovUd81dG4k87vZt6uA6YhDfWkUGruUu/aPmXLxXi45gZExnt9Bnw/qeQU8oGf82vPyaE0vO4aH0PbobB9JQ==", "requires": { "deepmerge": "^4.2.2", "escape-string-regexp": "^4.0.0", @@ -34297,9 +34329,9 @@ "dev": true }, "sass": { - "version": "1.57.1", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.57.1.tgz", - "integrity": "sha512-O2+LwLS79op7GI0xZ8fqzF7X2m/m8WFfI02dHOdsK5R2ECeS5F62zrwg/relM1rjSLy7Vd/DiMNIvPrQGsA0jw==", + "version": "1.62.1", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.62.1.tgz", + "integrity": "sha512-NHpxIzN29MXvWiuswfc1W3I0N8SXBd8UR26WntmDlRYf0bSADnwnOjsyMZ3lMezSlArD33Vs3YFhp7dWvL770A==", "requires": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", @@ -35312,9 +35344,9 @@ "dev": true }, "ts-jest": { - "version": "29.0.5", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.0.5.tgz", - "integrity": "sha512-PL3UciSgIpQ7f6XjVOmbi96vmDHUqAyqDr8YxzopDqX3kfgYtX1cuNeBjP+L9sFXi6nzsGGA6R3fP3DDDJyrxA==", + "version": "29.1.0", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.0.tgz", + "integrity": "sha512-ZhNr7Z4PcYa+JjMl62ir+zPiNJfXJN6E8hSLnaUKhOgqcn8vb3e537cpkd0FuAfRK3sR1LSqM1MOhliXNgOFPA==", "dev": true, "requires": { "bs-logger": "0.x", @@ -35449,9 +35481,9 @@ } }, "typescript": { - "version": "4.9.4", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.4.tgz", - "integrity": "sha512-Uz+dTXYzxXXbsFpM86Wh3dKCxrQqUcVMxwU54orwlJjOpO3ao8L7j5lH+dWfTwgCwIuM9GQ2kvVotzYJMXTBZg==", + "version": "4.9.5", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.5.tgz", + "integrity": "sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==", "dev": true }, "unbox-primitive": { diff --git a/web/ui/package.json b/web/ui/package.json index 22f2615ff..d5663ee75 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -16,16 +16,16 @@ "npm": ">=7.0.0" }, "devDependencies": { - "@types/jest": "^29.4.0", + "@types/jest": "^29.5.2", "@types/node": "^17.0.45", - "eslint-config-prettier": "^8.6.0", + "eslint-config-prettier": "^8.8.0", "eslint-config-react-app": "^7.0.1", "eslint-plugin-prettier": "^4.2.1", - "jest-canvas-mock": "^2.4.0", + "jest-canvas-mock": "^2.5.1", "jest-fetch-mock": "^3.0.3", "react-scripts": "^5.0.1", - "prettier": "^2.8.3", - "ts-jest": "^29.0.5", - "typescript": "^4.9.4" + "prettier": "^2.8.8", + "ts-jest": "^29.1.0", + "typescript": "^4.9.5" } } diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index fc0d1e5fc..d67209d8e 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -3,31 +3,31 @@ "version": "0.44.0", "private": true, "dependencies": { - "@codemirror/autocomplete": "^6.4.0", - "@codemirror/commands": "^6.2.0", - "@codemirror/language": "^6.4.0", - "@codemirror/lint": "^6.1.0", - "@codemirror/search": "^6.2.3", - "@codemirror/state": "^6.2.0", - "@codemirror/view": "^6.7.3", + "@codemirror/autocomplete": "^6.7.1", + "@codemirror/commands": "^6.2.4", + "@codemirror/language": "^6.7.0", + "@codemirror/lint": "^6.2.2", + "@codemirror/search": "^6.5.0", + "@codemirror/state": "^6.2.1", + "@codemirror/view": "^6.13.0", "@forevolve/bootstrap-dark": "^2.1.1", - "@fortawesome/fontawesome-svg-core": "6.2.1", - "@fortawesome/free-solid-svg-icons": "6.2.1", + "@fortawesome/fontawesome-svg-core": "6.4.0", + "@fortawesome/free-solid-svg-icons": "6.4.0", "@fortawesome/react-fontawesome": "0.2.0", - "@lezer/lr": "^1.3.1", - "@lezer/highlight": "^1.1.3", - "@lezer/common": "^1.0.2", + "@lezer/lr": "^1.3.6", + "@lezer/highlight": "^1.1.6", + "@lezer/common": "^1.0.3", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", "@prometheus-io/codemirror-promql": "0.44.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", - "downshift": "^7.2.0", + "downshift": "^7.6.0", "http-proxy-middleware": "^2.0.6", - "jquery": "^3.6.3", + "jquery": "^3.7.0", "jquery.flot.tooltip": "^0.9.0", "moment": "^2.29.4", - "moment-timezone": "^0.5.40", + "moment-timezone": "^0.5.43", "popper.js": "^1.14.3", "react": "^17.0.2", "react-copy-to-clipboard": "^5.1.0", @@ -37,8 +37,8 @@ "react-router-dom": "^5.3.4", "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", - "sanitize-html": "^2.8.1", - "sass": "1.57.1", + "sanitize-html": "^2.10.0", + "sass": "1.62.1", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" }, @@ -66,15 +66,15 @@ ], "devDependencies": { "@testing-library/react-hooks": "^7.0.2", - "@types/enzyme": "^3.10.12", + "@types/enzyme": "^3.10.13", "@types/flot": "0.0.32", "@types/jquery": "^3.5.16", - "@types/react": "^17.0.53", + "@types/react": "^17.0.60", "@types/react-copy-to-clipboard": "^5.0.4", - "@types/react-dom": "^17.0.18", + "@types/react-dom": "^17.0.20", "@types/react-router-dom": "^5.3.3", - "@types/sanitize-html": "^2.8.0", - "@types/sinon": "^10.0.13", + "@types/sanitize-html": "^2.9.0", + "@types/sinon": "^10.0.15", "@wojtekmaj/enzyme-adapter-react-17": "^0.8.0", "enzyme": "^3.11.0", "enzyme-to-json": "^3.6.2", diff --git a/web/web.go b/web/web.go index 27378b3b8..b9af2819b 100644 --- a/web/web.go +++ b/web/web.go @@ -744,7 +744,7 @@ func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) { } func toFloat64(f *io_prometheus_client.MetricFamily) float64 { - m := *f.Metric[0] + m := f.Metric[0] if m.Gauge != nil { return m.Gauge.GetValue() } From 4268feb9d7b6be75359e9fe0dcf765cf73fedd17 Mon Sep 17 00:00:00 2001 From: Leo Q Date: Wed, 7 Jun 2023 20:28:13 +0800 Subject: [PATCH 187/632] add alert for sd refresh failure (#12410) * add alert for sd refresh failure Due to config error or sd service down, prometheus may fail to refresh sd resource, which may lead to scrape fail or irrelavant metrics. Signed-off-by: Leo Q * apply suggestions Signed-off-by: Leo Q --------- Signed-off-by: Leo Q --- documentation/prometheus-mixin/alerts.libsonnet | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/documentation/prometheus-mixin/alerts.libsonnet b/documentation/prometheus-mixin/alerts.libsonnet index 0ee5d83c7..3efb0f27d 100644 --- a/documentation/prometheus-mixin/alerts.libsonnet +++ b/documentation/prometheus-mixin/alerts.libsonnet @@ -20,6 +20,20 @@ description: 'Prometheus %(prometheusName)s has failed to reload its configuration.' % $._config, }, }, + { + alert: 'PrometheusSDRefreshFailure', + expr: ||| + increase(prometheus_sd_refresh_failures_total{%(prometheusSelector)s}[10m]) > 0 + ||| % $._config, + 'for': '20m', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Failed Prometheus SD refresh.', + description: 'Prometheus %(prometheusName)s has failed to refresh SD with mechanism {{$labels.mechanism}}.' % $._config, + }, + }, { alert: 'PrometheusNotificationQueueRunningFull', expr: ||| From edfc97a77e74d0f3996fed16c8a1b29c2b10ed35 Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Wed, 7 Jun 2023 16:00:15 +0200 Subject: [PATCH 188/632] Bump UI version (#12440) Signed-off-by: Jesus Vazquez --- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/react-app/package.json | 4 ++-- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index e9c952c9c..6c404a6c4 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.44.0", + "version": "0.45.0-rc.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.44.0", + "@prometheus-io/lezer-promql": "0.45.0-rc.0", "lru-cache": "^6.0.0" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 08249b418..389eb88f4 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.44.0", + "version": "0.45.0-rc.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index a672a1767..f80d2fd6e 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -28,10 +28,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.44.0", + "version": "0.45.0-rc.0", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.44.0", + "@prometheus-io/lezer-promql": "0.45.0-rc.0", "lru-cache": "^6.0.0" }, "devDependencies": { @@ -61,7 +61,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.44.0", + "version": "0.45.0-rc.0", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.2.3", @@ -20765,7 +20765,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.44.0", + "version": "0.45.0-rc.0", "dependencies": { "@codemirror/autocomplete": "^6.7.1", "@codemirror/commands": "^6.2.4", @@ -20783,7 +20783,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.44.0", + "@prometheus-io/codemirror-promql": "0.45.0-rc.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.0", @@ -23423,7 +23423,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.44.0", + "@prometheus-io/codemirror-promql": "0.45.0-rc.0", "@testing-library/react-hooks": "^7.0.2", "@types/enzyme": "^3.10.13", "@types/flot": "0.0.32", @@ -23487,7 +23487,7 @@ "@lezer/common": "^1.0.3", "@lezer/highlight": "^1.1.6", "@lezer/lr": "^1.3.6", - "@prometheus-io/lezer-promql": "0.44.0", + "@prometheus-io/lezer-promql": "0.45.0-rc.0", "@types/lru-cache": "^5.1.1", "isomorphic-fetch": "^3.0.0", "lru-cache": "^6.0.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index d67209d8e..d8efd5681 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.44.0", + "version": "0.45.0-rc.0", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.7.1", @@ -19,7 +19,7 @@ "@lezer/common": "^1.0.3", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.44.0", + "@prometheus-io/codemirror-promql": "0.45.0-rc.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.0", From 344c8ff97ce261dbaaf2720f1e5164a8fee19184 Mon Sep 17 00:00:00 2001 From: Michael Hoffmann Date: Wed, 7 Jun 2023 22:54:30 +0200 Subject: [PATCH 189/632] feat: dont compile regex matcher if we know its a literal (#12434) labels: dont compile regex matcher if we know its a literal Signed-off-by: Michael Hoffmann Co-authored-by: Sharad --- model/labels/matcher_test.go | 18 ++++++++++++++---- model/labels/regexp.go | 17 +++++++++++++++++ 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/model/labels/matcher_test.go b/model/labels/matcher_test.go index 14615a50d..d26e9329f 100644 --- a/model/labels/matcher_test.go +++ b/model/labels/matcher_test.go @@ -102,12 +102,12 @@ func TestInverse(t *testing.T) { expected: &Matcher{Type: MatchEqual, Name: "name2", Value: "value2"}, }, { - matcher: &Matcher{Type: MatchRegexp, Name: "name3", Value: "value3"}, - expected: &Matcher{Type: MatchNotRegexp, Name: "name3", Value: "value3"}, + matcher: &Matcher{Type: MatchRegexp, Name: "name3", Value: "value3.*"}, + expected: &Matcher{Type: MatchNotRegexp, Name: "name3", Value: "value3.*"}, }, { - matcher: &Matcher{Type: MatchNotRegexp, Name: "name4", Value: "value4"}, - expected: &Matcher{Type: MatchRegexp, Name: "name4", Value: "value4"}, + matcher: &Matcher{Type: MatchNotRegexp, Name: "name4", Value: "value4.*"}, + expected: &Matcher{Type: MatchRegexp, Name: "name4", Value: "value4.*"}, }, } @@ -123,3 +123,13 @@ func BenchmarkMatchType_String(b *testing.B) { _ = MatchType(i % int(MatchNotRegexp+1)).String() } } + +func BenchmarkNewMatcher(b *testing.B) { + b.Run("regex matcher with literal", func(b *testing.B) { + b.ReportAllocs() + b.ResetTimer() + for i := 0; i <= b.N; i++ { + NewMatcher(MatchRegexp, "foo", "bar") + } + }) +} diff --git a/model/labels/regexp.go b/model/labels/regexp.go index e09a63772..14319c7f7 100644 --- a/model/labels/regexp.go +++ b/model/labels/regexp.go @@ -25,9 +25,16 @@ type FastRegexMatcher struct { prefix string suffix string contains string + + // shortcut for literals + literal bool + value string } func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { + if isLiteral(v) { + return &FastRegexMatcher{literal: true, value: v}, nil + } re, err := regexp.Compile("^(?:" + v + ")$") if err != nil { return nil, err @@ -50,6 +57,9 @@ func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { } func (m *FastRegexMatcher) MatchString(s string) bool { + if m.literal { + return s == m.value + } if m.prefix != "" && !strings.HasPrefix(s, m.prefix) { return false } @@ -63,9 +73,16 @@ func (m *FastRegexMatcher) MatchString(s string) bool { } func (m *FastRegexMatcher) GetRegexString() string { + if m.literal { + return m.value + } return m.re.String() } +func isLiteral(re string) bool { + return regexp.QuoteMeta(re) == re +} + // optimizeConcatRegex returns literal prefix/suffix text that can be safely // checked against the label value before running the regexp matcher. func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) { From f7abe27fef92adc3239c7c3e4006f570b56e71b8 Mon Sep 17 00:00:00 2001 From: sinkingpoint Date: Fri, 9 Jun 2023 14:06:09 +1000 Subject: [PATCH 190/632] Remove trailing commas from Exemplar API docs The trailing commas here make this example invalid JSON. Here we remove them. Signed-off-by: sinkingpoint --- docs/querying/api.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index edce366ee..b2b72d9cd 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -407,7 +407,7 @@ $ curl -g 'http://localhost:9090/api/v1/query_exemplars?query=test_exemplar_metr "traceID": "EpTxMJ40fUus7aGY" }, "value": "6", - "timestamp": 1600096945.479, + "timestamp": 1600096945.479 } ] }, @@ -424,15 +424,15 @@ $ curl -g 'http://localhost:9090/api/v1/query_exemplars?query=test_exemplar_metr "traceID": "Olp9XHlq763ccsfa" }, "value": "19", - "timestamp": 1600096955.479, + "timestamp": 1600096955.479 }, { "labels": { "traceID": "hCtjygkIHwAN9vs4" }, "value": "20", - "timestamp": 1600096965.489, - }, + "timestamp": 1600096965.489 + } ] } ] From 1ea477f4bcd04c580e51711dca346a06421b311c Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Mon, 12 Jun 2023 12:17:20 -0300 Subject: [PATCH 191/632] Add feature flag to squash metadata from /api/v1/metadata (#12391) Signed-off-by: ArthurSens --- docs/querying/api.md | 27 +++++++ web/api/v1/api.go | 16 ++++- web/api/v1/api_test.go | 160 ++++++++++++++++++++++++++++++++++++++--- 3 files changed, 193 insertions(+), 10 deletions(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index b2b72d9cd..ca7f64f62 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -863,6 +863,7 @@ GET /api/v1/metadata URL query parameters: - `limit=`: Maximum number of metrics to return. +- `limit_per_metric=`: Maximum number of metadata to return per metric. - `metric=`: A metric name to filter metadata for. All metric metadata is retrieved if left empty. The `data` section of the query result consists of an object where each key is a metric name and each value is a list of unique metadata objects, as exposed for that metric name across all targets. @@ -898,6 +899,32 @@ curl -G http://localhost:9090/api/v1/metadata?limit=2 } ``` +The following example returns only one metadata entry for each metric. + +```json +curl -G http://localhost:9090/api/v1/metadata?limit_per_metric=1 + +{ + "status": "success", + "data": { + "cortex_ring_tokens": [ + { + "type": "gauge", + "help": "Number of tokens in the ring", + "unit": "" + } + ], + "http_requests_total": [ + { + "type": "counter", + "help": "Number of HTTP requests", + "unit": "" + } + ] + } +} +``` + The following example returns metadata only for the metric `http_requests_total`. ```json diff --git a/web/api/v1/api.go b/web/api/v1/api.go index f7249efb0..c74f6ee7a 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1194,16 +1194,26 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit must be a number")}, nil, nil} } } + limitPerMetric := -1 + if s := r.FormValue("limit_per_metric"); s != "" { + var err error + if limitPerMetric, err = strconv.Atoi(s); err != nil { + return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit_per_metric must be a number")}, nil, nil} + } + } metric := r.FormValue("metric") for _, tt := range api.targetRetriever(r.Context()).TargetsActive() { for _, t := range tt { - if metric == "" { for _, mm := range t.MetadataList() { m := metadata{Type: mm.Type, Help: mm.Help, Unit: mm.Unit} ms, ok := metrics[mm.Metric] + if limitPerMetric > 0 && len(ms) >= limitPerMetric { + continue + } + if !ok { ms = map[metadata]struct{}{} metrics[mm.Metric] = ms @@ -1217,6 +1227,10 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { m := metadata{Type: md.Type, Help: md.Help, Unit: md.Unit} ms, ok := metrics[md.Metric] + if limitPerMetric > 0 && len(ms) >= limitPerMetric { + continue + } + if !ok { ms = map[metadata]struct{}{} metrics[md.Metric] = ms diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index baee2189e..16e74071c 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -1023,15 +1023,16 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E } type test struct { - endpoint apiFunc - params map[string]string - query url.Values - response interface{} - responseLen int - errType errorType - sorter func(interface{}) - metadata []targetMetadata - exemplars []exemplar.QueryResult + endpoint apiFunc + params map[string]string + query url.Values + response interface{} + responseLen int + responseMetadataTotal int + errType errorType + sorter func(interface{}) + metadata []targetMetadata + exemplars []exemplar.QueryResult } tests := []test{ @@ -1776,6 +1777,126 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }, responseLen: 2, }, + // With a limit for the number of metadata per metric. + { + endpoint: api.metricMetadata, + query: url.Values{"limit_per_metric": []string{"1"}}, + metadata: []targetMetadata{ + { + identifier: "test", + metadata: []scrape.MetricMetadata{ + { + Metric: "go_threads", + Type: textparse.MetricTypeGauge, + Help: "Number of OS threads created", + Unit: "", + }, + { + Metric: "go_threads", + Type: textparse.MetricTypeGauge, + Help: "Repeated metadata", + Unit: "", + }, + { + Metric: "go_gc_duration_seconds", + Type: textparse.MetricTypeSummary, + Help: "A summary of the GC invocation durations.", + Unit: "", + }, + }, + }, + }, + response: map[string][]metadata{ + "go_threads": { + {textparse.MetricTypeGauge, "Number of OS threads created", ""}, + }, + "go_gc_duration_seconds": { + {textparse.MetricTypeSummary, "A summary of the GC invocation durations.", ""}, + }, + }, + }, + // With a limit for the number of metadata per metric and per metric. + { + endpoint: api.metricMetadata, + query: url.Values{"limit_per_metric": []string{"1"}, "limit": []string{"1"}}, + metadata: []targetMetadata{ + { + identifier: "test", + metadata: []scrape.MetricMetadata{ + { + Metric: "go_threads", + Type: textparse.MetricTypeGauge, + Help: "Number of OS threads created", + Unit: "", + }, + { + Metric: "go_threads", + Type: textparse.MetricTypeGauge, + Help: "Repeated metadata", + Unit: "", + }, + { + Metric: "go_gc_duration_seconds", + Type: textparse.MetricTypeSummary, + Help: "A summary of the GC invocation durations.", + Unit: "", + }, + }, + }, + }, + responseLen: 1, + responseMetadataTotal: 1, + }, + + // With a limit for the number of metadata per metric and per metric, while having multiple targets. + { + endpoint: api.metricMetadata, + query: url.Values{"limit_per_metric": []string{"1"}, "limit": []string{"1"}}, + metadata: []targetMetadata{ + { + identifier: "test", + metadata: []scrape.MetricMetadata{ + { + Metric: "go_threads", + Type: textparse.MetricTypeGauge, + Help: "Number of OS threads created", + Unit: "", + }, + { + Metric: "go_threads", + Type: textparse.MetricTypeGauge, + Help: "Repeated metadata", + Unit: "", + }, + { + Metric: "go_gc_duration_seconds", + Type: textparse.MetricTypeSummary, + Help: "A summary of the GC invocation durations.", + Unit: "", + }, + }, + }, + { + identifier: "secondTarget", + metadata: []scrape.MetricMetadata{ + { + Metric: "go_threads", + Type: textparse.MetricTypeGauge, + Help: "Number of OS threads created, but from a different target", + Unit: "", + }, + { + Metric: "go_gc_duration_seconds", + Type: textparse.MetricTypeSummary, + Help: "A summary of the GC invocation durations, but from a different target.", + Unit: "", + }, + }, + }, + }, + responseLen: 1, + responseMetadataTotal: 1, + }, // When requesting a specific metric that is present. { endpoint: api.metricMetadata, @@ -2565,6 +2686,9 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E if test.responseLen != 0 { assertAPIResponseLength(t, res.data, test.responseLen) + if test.responseMetadataTotal != 0 { + assertAPIResponseMetadataLen(t, res.data, test.responseMetadataTotal) + } } else { assertAPIResponse(t, res.data, test.response) } @@ -2615,6 +2739,24 @@ func assertAPIResponseLength(t *testing.T, got interface{}, expLen int) { } } +func assertAPIResponseMetadataLen(t *testing.T, got interface{}, expLen int) { + t.Helper() + + var gotLen int + response := got.(map[string][]metadata) + for _, m := range response { + gotLen += len(m) + } + + if gotLen != expLen { + t.Fatalf( + "Amount of metadata in the response does not match, expected:\n%d\ngot:\n%d", + expLen, + gotLen, + ) + } +} + type fakeDB struct { err error } From 81bf3e63a4663e27a935bab0167f73b9da3afeb5 Mon Sep 17 00:00:00 2001 From: Jayapriya Pai Date: Mon, 12 Jun 2023 21:39:19 +0530 Subject: [PATCH 192/632] docs: update prometheus-operator link Signed-off-by: Jayapriya Pai --- docs/configuration/configuration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 3a9ace2b6..4287272f6 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -2160,7 +2160,7 @@ attach_metadata: See [this example Prometheus configuration file](/documentation/examples/prometheus-kubernetes.yml) for a detailed example of configuring Prometheus for Kubernetes. -You may wish to check out the 3rd party [Prometheus Operator](https://github.com/coreos/prometheus-operator), +You may wish to check out the 3rd party [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator), which automates the Prometheus setup on top of Kubernetes. ### `` From 06843db80a58bc679cf3c1863891abdd1c7cdeba Mon Sep 17 00:00:00 2001 From: Bartol Deak Date: Mon, 12 Jun 2023 22:56:44 +0200 Subject: [PATCH 193/632] Hide `which` stderr output Signed-off-by: Bartol Deak --- Makefile | 2 +- Makefile.common | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 3877ee719..2816c9801 100644 --- a/Makefile +++ b/Makefile @@ -82,7 +82,7 @@ assets-tarball: assets .PHONY: parser parser: @echo ">> running goyacc to generate the .go file." -ifeq (, $(shell which goyacc)) +ifeq (, $(shell which goyacc 2>/dev/null)) @echo "goyacc not installed so skipping" @echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0" else diff --git a/Makefile.common b/Makefile.common index b111d2562..2bae0efab 100644 --- a/Makefile.common +++ b/Makefile.common @@ -178,7 +178,7 @@ endif .PHONY: common-yamllint common-yamllint: @echo ">> running yamllint on all YAML files in the repository" -ifeq (, $(shell which yamllint)) +ifeq (, $(shell which yamllint 2>/dev/null)) @echo "yamllint not installed so skipping" else yamllint . From 6a18962cfac3445693c78b157976e31c2bb81f12 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Tue, 13 Jun 2023 10:38:00 +0200 Subject: [PATCH 194/632] mv labels_string.go labels_stringlabels.go (#12328) This is a minor cosmetical change, but my IDE (and I guess many of them) nests `labels_string.go` under `labels.go` because it assumes it's the file generated by the `stringer` tool, which follows that naming pattern. Signed-off-by: Oleg Zaytsev --- model/labels/{labels_string.go => labels_stringlabels.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename model/labels/{labels_string.go => labels_stringlabels.go} (100%) diff --git a/model/labels/labels_string.go b/model/labels/labels_stringlabels.go similarity index 100% rename from model/labels/labels_string.go rename to model/labels/labels_stringlabels.go From 95b7d592bae8542c58efa92bb67777a047add231 Mon Sep 17 00:00:00 2001 From: Bartol Deak Date: Tue, 13 Jun 2023 11:57:52 +0200 Subject: [PATCH 195/632] rewrite `which` with `command -v` Signed-off-by: Bartol Deak --- Makefile | 2 +- Makefile.common | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 2816c9801..0dd8673af 100644 --- a/Makefile +++ b/Makefile @@ -82,7 +82,7 @@ assets-tarball: assets .PHONY: parser parser: @echo ">> running goyacc to generate the .go file." -ifeq (, $(shell which goyacc 2>/dev/null)) +ifeq (, $(shell command -v goyacc > /dev/null)) @echo "goyacc not installed so skipping" @echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0" else diff --git a/Makefile.common b/Makefile.common index 2bae0efab..e372d3473 100644 --- a/Makefile.common +++ b/Makefile.common @@ -49,7 +49,7 @@ endif GOTEST := $(GO) test GOTEST_DIR := ifneq ($(CIRCLE_JOB),) -ifneq ($(shell which gotestsum),) +ifneq ($(shell command -v gotestsum > /dev/null),) GOTEST_DIR := test-results GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- endif @@ -178,7 +178,7 @@ endif .PHONY: common-yamllint common-yamllint: @echo ">> running yamllint on all YAML files in the repository" -ifeq (, $(shell which yamllint 2>/dev/null)) +ifeq (, $(shell command -v yamllint > /dev/null)) @echo "yamllint not installed so skipping" else yamllint . From 0941ea4afcc6df7d70fa9578bc9403c5fec8ea1c Mon Sep 17 00:00:00 2001 From: tyltr Date: Fri, 16 Jun 2023 11:09:19 +0800 Subject: [PATCH 196/632] typo Signed-off-by: tyltr --- cmd/prometheus/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index e05ac7957..3d723f152 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -429,7 +429,7 @@ func main() { _, err := a.Parse(os.Args[1:]) if err != nil { - fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing commandline arguments: %w", err)) + fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing command line arguments: %w", err)) a.Usage(os.Args[1:]) os.Exit(2) } From 0c6cf868287b47b01272caa1a73e21a07befc0a0 Mon Sep 17 00:00:00 2001 From: timmartin-stripe <131782471+timmartin-stripe@users.noreply.github.com> Date: Fri, 16 Jun 2023 06:55:41 -0400 Subject: [PATCH 197/632] Add sentence explaining what happens when the `for` clause is omitted (#12457) Just adding a statement here explaining that the default is an immediate move to "active" without a pending state. Signed-off-by: Tim Martin --- docs/configuration/alerting_rules.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/configuration/alerting_rules.md b/docs/configuration/alerting_rules.md index 74f6c02b1..3c1ec84f0 100644 --- a/docs/configuration/alerting_rules.md +++ b/docs/configuration/alerting_rules.md @@ -32,7 +32,11 @@ groups: ``` The optional `for` clause causes Prometheus to wait for a certain duration -between first encountering a new expression output vector element and counting an alert as firing for this element. In this case, Prometheus will check that the alert continues to be active during each evaluation for 10 minutes before firing the alert. Elements that are active, but not firing yet, are in the pending state. +between first encountering a new expression output vector element and counting +an alert as firing for this element. In this case, Prometheus will check that +the alert continues to be active during each evaluation for 10 minutes before +firing the alert. Elements that are active, but not firing yet, are in the pending state. +Alerting rules without the `for` clause will become active on the first evaluation. The `labels` clause allows specifying a set of additional labels to be attached to the alert. Any existing conflicting labels will be overwritten. The label From 0de855508c314ddfd50fb48851be384c61e65501 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 30 May 2023 15:22:24 +0200 Subject: [PATCH 198/632] Add support for inline TLS certificates Signed-off-by: Julien Pivotto --- docs/configuration/configuration.md | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 3a9ace2b6..db41c3247 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -427,11 +427,16 @@ Where `` must be unique across all scrape configurations. A `tls_config` allows configuring TLS connections. ```yaml -# CA certificate to validate API server certificate with. +# CA certificate to validate API server certificate with. At most one of ca and ca_file is allowed. +[ ca: ] [ ca_file: ] -# Certificate and key files for client cert authentication to the server. +# Certificate and key for client cert authentication to the server. +# At most one of cert and cert_file is allowed. +# At most one of key and key_file is allowed. +[ cert: ] [ cert_file: ] +[ key: ] [ key_file: ] # ServerName extension to indicate the name of the server. From 9b6355f02eee0df38a7c797a02fa5fa47242fd65 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 17 Jun 2023 08:18:31 +0000 Subject: [PATCH 199/632] build(deps): bump golang.org/x/oauth2 from 0.8.0 to 0.9.0 Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.8.0 to 0.9.0. - [Commits](https://github.com/golang/oauth2/compare/v0.8.0...v0.9.0) --- updated-dependencies: - dependency-name: golang.org/x/oauth2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 12 ++++++------ go.sum | 24 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index dd75cc333..7d3ed3ab8 100644 --- a/go.mod +++ b/go.mod @@ -64,10 +64,10 @@ require ( go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.2 go.uber.org/goleak v1.2.1 - golang.org/x/net v0.10.0 - golang.org/x/oauth2 v0.8.0 + golang.org/x/net v0.11.0 + golang.org/x/oauth2 v0.9.0 golang.org/x/sync v0.2.0 - golang.org/x/sys v0.8.0 + golang.org/x/sys v0.9.0 golang.org/x/time v0.3.0 golang.org/x/tools v0.9.3 google.golang.org/api v0.114.0 @@ -178,11 +178,11 @@ require ( go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect go.opentelemetry.io/otel/metric v1.16.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect - golang.org/x/crypto v0.8.0 // indirect + golang.org/x/crypto v0.10.0 // indirect golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 golang.org/x/mod v0.10.0 // indirect - golang.org/x/term v0.8.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/term v0.9.0 // indirect + golang.org/x/text v0.10.0 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 774deb85b..f3583ccb1 100644 --- a/go.sum +++ b/go.sum @@ -844,8 +844,8 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= -golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -928,8 +928,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -937,8 +937,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.9.0 h1:BPpt2kU7oMRq3kCHAA1tbSEshXRw1LpG2ztgDwrzuAs= +golang.org/x/oauth2 v0.9.0/go.mod h1:qYgFZaFiu6Wg24azG8bdV52QJXJGbZzIIsRCdVKzbLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1021,14 +1021,14 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1039,8 +1039,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From f28d7d23cdd279f4530fcc66c8ff248aa5e57a56 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 17 Jun 2023 08:18:50 +0000 Subject: [PATCH 200/632] build(deps): bump github.com/stretchr/testify Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.8.2 to 1.8.4. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.8.2...v1.8.4) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 2 +- documentation/examples/remote_storage/go.sum | 8 ++------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index c0d433196..2f2861947 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -11,7 +11,7 @@ require ( github.com/prometheus/client_golang v1.15.0 github.com/prometheus/common v0.42.0 github.com/prometheus/prometheus v0.44.0 - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.8.4 ) require ( diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index e0eac05c1..c540d8984 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -211,16 +211,12 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= From 6adb64a9669e50819ac0b344aea591df3e6fbc15 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 17 Jun 2023 08:19:02 +0000 Subject: [PATCH 201/632] build(deps): bump github.com/aws/aws-sdk-go from 1.44.276 to 1.44.284 Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.276 to 1.44.284. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.276...v1.44.284) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index dd75cc333..ea61b5215 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/Azure/go-autorest/autorest/adal v0.9.23 github.com/alecthomas/kingpin/v2 v2.3.2 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 - github.com/aws/aws-sdk-go v1.44.276 + github.com/aws/aws-sdk-go v1.44.284 github.com/cespare/xxhash/v2 v2.2.0 github.com/dennwc/varint v1.0.0 github.com/digitalocean/godo v1.99.0 diff --git a/go.sum b/go.sum index 774deb85b..bf091742a 100644 --- a/go.sum +++ b/go.sum @@ -106,8 +106,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.276 h1:ywPlx9C5Yc482dUgAZ9bHpQ6onVvJvYE9FJWsNDCEy0= -github.com/aws/aws-sdk-go v1.44.276/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.284 h1:Oc5Kubi43/VCkerlt3ZU3KpBju6BpNkoG3s7E8vj/O8= +github.com/aws/aws-sdk-go v1.44.284/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= From 3eaa7eb538a14bfbfbb1f6e11c4f0d5a4878b10e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Mon, 19 Jun 2023 10:44:24 +0200 Subject: [PATCH 202/632] fix: apply suggested changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- util/fmtutil/format.go | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/util/fmtutil/format.go b/util/fmtutil/format.go index 291308dc2..5cff0516b 100644 --- a/util/fmtutil/format.go +++ b/util/fmtutil/format.go @@ -44,8 +44,8 @@ var MetricMetadataTypeValue = map[string]int32{ "STATESET": 7, } -// FormatMetrics convert metric family to a writerequest. -func FormatMetrics(mf map[string]*dto.MetricFamily, extraLabels map[string]string) (*prompb.WriteRequest, error) { +// CreateWriteRequest convert metric family to a writerequest. +func CreateWriteRequest(mf map[string]*dto.MetricFamily, extraLabels map[string]string) (*prompb.WriteRequest, error) { wr := &prompb.WriteRequest{} // build metric list @@ -76,16 +76,16 @@ func FormatMetrics(mf map[string]*dto.MetricFamily, extraLabels map[string]strin return wr, nil } -func makeTimeserie(wr *prompb.WriteRequest, labels map[string]string, timestamp int64, value float64) { - var timeserie prompb.TimeSeries - timeserie.Labels = makeLabels(labels) - timeserie.Samples = []prompb.Sample{ +func toTimeseries(wr *prompb.WriteRequest, labels map[string]string, timestamp int64, value float64) { + var ts prompb.TimeSeries + ts.Labels = makeLabels(labels) + ts.Samples = []prompb.Sample{ { Timestamp: timestamp, Value: value, }, } - wr.Timeseries = append(wr.Timeseries, timeserie) + wr.Timeseries = append(wr.Timeseries, ts) } func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Metric) error { @@ -98,9 +98,9 @@ func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Me switch { case m.Gauge != nil: - makeTimeserie(wr, labels, timestamp, m.GetGauge().GetValue()) + toTimeseries(wr, labels, timestamp, m.GetGauge().GetValue()) case m.Counter != nil: - makeTimeserie(wr, labels, timestamp, m.GetCounter().GetValue()) + toTimeseries(wr, labels, timestamp, m.GetCounter().GetValue()) case m.Summary != nil: metricName := labels[model.MetricNameLabel] // Preserve metric name order with first quantile labels timeseries then sum suffix timeserie and finally count suffix timeserie @@ -112,15 +112,15 @@ func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Me for _, q := range m.GetSummary().Quantile { quantileLabels[model.QuantileLabel] = fmt.Sprint(q.GetQuantile()) - makeTimeserie(wr, quantileLabels, timestamp, q.GetValue()) + toTimeseries(wr, quantileLabels, timestamp, q.GetValue()) } // Overwrite label model.MetricNameLabel for count and sum metrics // Add Summary sum timeserie labels[model.MetricNameLabel] = metricName + sumStr - makeTimeserie(wr, labels, timestamp, m.GetSummary().GetSampleSum()) + toTimeseries(wr, labels, timestamp, m.GetSummary().GetSampleSum()) // Add Summary count timeserie labels[model.MetricNameLabel] = metricName + countStr - makeTimeserie(wr, labels, timestamp, float64(m.GetSummary().GetSampleCount())) + toTimeseries(wr, labels, timestamp, float64(m.GetSummary().GetSampleCount())) case m.Histogram != nil: metricName := labels[model.MetricNameLabel] @@ -133,18 +133,18 @@ func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Me for _, b := range m.GetHistogram().Bucket { bucketLabels[model.MetricNameLabel] = metricName + bucketStr bucketLabels[model.BucketLabel] = fmt.Sprint(b.GetUpperBound()) - makeTimeserie(wr, bucketLabels, timestamp, float64(b.GetCumulativeCount())) + toTimeseries(wr, bucketLabels, timestamp, float64(b.GetCumulativeCount())) } // Overwrite label model.MetricNameLabel for count and sum metrics // Add Histogram sum timeserie labels[model.MetricNameLabel] = metricName + sumStr - makeTimeserie(wr, labels, timestamp, m.GetHistogram().GetSampleSum()) + toTimeseries(wr, labels, timestamp, m.GetHistogram().GetSampleSum()) // Add Histogram count timeserie labels[model.MetricNameLabel] = metricName + countStr - makeTimeserie(wr, labels, timestamp, float64(m.GetHistogram().GetSampleCount())) + toTimeseries(wr, labels, timestamp, float64(m.GetHistogram().GetSampleCount())) case m.Untyped != nil: - makeTimeserie(wr, labels, timestamp, m.GetUntyped().GetValue()) + toTimeseries(wr, labels, timestamp, m.GetUntyped().GetValue()) default: err = errors.New("unsupported metric type") } @@ -208,5 +208,5 @@ func ParseMetricsTextAndFormat(input io.Reader, labels map[string]string) (*prom if err != nil { return nil, err } - return FormatMetrics(mf, labels) + return CreateWriteRequest(mf, labels) } From 8bc2a19469119cccf51dd067efdb413e1150ca46 Mon Sep 17 00:00:00 2001 From: Matthias Loibl Date: Mon, 19 Jun 2023 17:40:15 +0200 Subject: [PATCH 203/632] web: Initialize requestCounter metrics to 0 with handler and 200k labels. Signed-off-by: Matthias Loibl --- web/web.go | 1 + 1 file changed, 1 insertion(+) diff --git a/web/web.go b/web/web.go index b9af2819b..ddb5430ed 100644 --- a/web/web.go +++ b/web/web.go @@ -158,6 +158,7 @@ func (m *metrics) instrumentHandlerWithPrefix(prefix string) func(handlerName st } func (m *metrics) instrumentHandler(handlerName string, handler http.HandlerFunc) http.HandlerFunc { + m.requestCounter.WithLabelValues(handlerName, "200").Add(0) return promhttp.InstrumentHandlerCounter( m.requestCounter.MustCurryWith(prometheus.Labels{"handler": handlerName}), promhttp.InstrumentHandlerDuration( From d19fa62476ada4ec6eb4399c93d0e11cca2aece9 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 20 Jun 2023 13:39:32 +0200 Subject: [PATCH 204/632] Revert "Improving Performance on the API Gzip Handler (#12363)" This reverts commit dfae954dc1137568f33564e8cffda321f2867925. Signed-off-by: Julien Pivotto --- go.mod | 1 - go.sum | 2 - util/httputil/compression.go | 74 +++++++++++-------- util/httputil/compression_test.go | 115 +++--------------------------- 4 files changed, 52 insertions(+), 140 deletions(-) diff --git a/go.mod b/go.mod index dd75cc333..9e4ab7ce9 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,6 @@ require ( github.com/hetznercloud/hcloud-go v1.45.1 github.com/ionos-cloud/sdk-go/v6 v6.1.7 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.16.5 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/linode/linodego v1.17.0 github.com/miekg/dns v1.1.54 diff --git a/go.sum b/go.sum index 774deb85b..897e6acff 100644 --- a/go.sum +++ b/go.sum @@ -507,8 +507,6 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= -github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= diff --git a/util/httputil/compression.go b/util/httputil/compression.go index 5e9276958..b96c088cb 100644 --- a/util/httputil/compression.go +++ b/util/httputil/compression.go @@ -14,11 +14,11 @@ package httputil import ( + "compress/gzip" + "compress/zlib" + "io" "net/http" "strings" - - "github.com/klauspost/compress/gzhttp" - "github.com/klauspost/compress/zlib" ) const ( @@ -28,27 +28,53 @@ const ( deflateEncoding = "deflate" ) -// Wrapper around http.ResponseWriter which adds deflate compression -type deflatedResponseWriter struct { +// Wrapper around http.Handler which adds suitable response compression based +// on the client's Accept-Encoding headers. +type compressedResponseWriter struct { http.ResponseWriter - writer *zlib.Writer + writer io.Writer } // Writes HTTP response content data. -func (c *deflatedResponseWriter) Write(p []byte) (int, error) { +func (c *compressedResponseWriter) Write(p []byte) (int, error) { return c.writer.Write(p) } -// Close Closes the deflatedResponseWriter and ensures to flush all data before. -func (c *deflatedResponseWriter) Close() { - c.writer.Close() +// Closes the compressedResponseWriter and ensures to flush all data before. +func (c *compressedResponseWriter) Close() { + if zlibWriter, ok := c.writer.(*zlib.Writer); ok { + zlibWriter.Flush() + } + if gzipWriter, ok := c.writer.(*gzip.Writer); ok { + gzipWriter.Flush() + } + if closer, ok := c.writer.(io.Closer); ok { + defer closer.Close() + } } -// Constructs a new deflatedResponseWriter to compress the original writer using 'deflate' compression. -func newDeflateResponseWriter(writer http.ResponseWriter) *deflatedResponseWriter { - return &deflatedResponseWriter{ +// Constructs a new compressedResponseWriter based on client request headers. +func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request) *compressedResponseWriter { + encodings := strings.Split(req.Header.Get(acceptEncodingHeader), ",") + for _, encoding := range encodings { + switch strings.TrimSpace(encoding) { + case gzipEncoding: + writer.Header().Set(contentEncodingHeader, gzipEncoding) + return &compressedResponseWriter{ + ResponseWriter: writer, + writer: gzip.NewWriter(writer), + } + case deflateEncoding: + writer.Header().Set(contentEncodingHeader, deflateEncoding) + return &compressedResponseWriter{ + ResponseWriter: writer, + writer: zlib.NewWriter(writer), + } + } + } + return &compressedResponseWriter{ ResponseWriter: writer, - writer: zlib.NewWriter(writer), + writer: writer, } } @@ -60,21 +86,7 @@ type CompressionHandler struct { // ServeHTTP adds compression to the original http.Handler's ServeHTTP() method. func (c CompressionHandler) ServeHTTP(writer http.ResponseWriter, req *http.Request) { - encodings := strings.Split(req.Header.Get(acceptEncodingHeader), ",") - for _, encoding := range encodings { - switch strings.TrimSpace(encoding) { - case gzipEncoding: - gzhttp.GzipHandler(c.Handler).ServeHTTP(writer, req) - return - case deflateEncoding: - compWriter := newDeflateResponseWriter(writer) - writer.Header().Set(contentEncodingHeader, deflateEncoding) - c.Handler.ServeHTTP(compWriter, req) - compWriter.Close() - return - default: - c.Handler.ServeHTTP(writer, req) - return - } - } + compWriter := newCompressedResponseWriter(writer, req) + c.Handler.ServeHTTP(compWriter, req) + compWriter.Close() } diff --git a/util/httputil/compression_test.go b/util/httputil/compression_test.go index b7148fc1c..851279761 100644 --- a/util/httputil/compression_test.go +++ b/util/httputil/compression_test.go @@ -17,30 +17,23 @@ import ( "bytes" "compress/gzip" "compress/zlib" - "encoding/json" - "fmt" "io" "net/http" "net/http/httptest" - "strings" "testing" "github.com/stretchr/testify/require" - - "github.com/prometheus/prometheus/model/labels" ) var ( - mux *http.ServeMux - server *httptest.Server - respBody = strings.Repeat("Hello World!", 500) + mux *http.ServeMux + server *httptest.Server ) func setup() func() { mux = http.NewServeMux() server = httptest.NewServer(mux) return func() { - server.CloseClientConnections() server.Close() } } @@ -48,7 +41,7 @@ func setup() func() { func getCompressionHandlerFunc() CompressionHandler { hf := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(respBody)) + w.Write([]byte("Hello World!")) } return CompressionHandler{ Handler: http.HandlerFunc(hf), @@ -74,8 +67,9 @@ func TestCompressionHandler_PlainText(t *testing.T) { contents, err := io.ReadAll(resp.Body) require.NoError(t, err, "unexpected error while creating the response body reader") + expected := "Hello World!" actual := string(contents) - require.Equal(t, respBody, actual, "expected response with content") + require.Equal(t, expected, actual, "expected response with content") } func TestCompressionHandler_Gzip(t *testing.T) { @@ -109,7 +103,8 @@ func TestCompressionHandler_Gzip(t *testing.T) { require.NoError(t, err, "unexpected error while reading the response body") actual := buf.String() - require.Equal(t, respBody, actual, "unexpected response content") + expected := "Hello World!" + require.Equal(t, expected, actual, "unexpected response content") } func TestCompressionHandler_Deflate(t *testing.T) { @@ -143,98 +138,6 @@ func TestCompressionHandler_Deflate(t *testing.T) { require.NoError(t, err, "unexpected error while reading the response body") actual := buf.String() - require.Equal(t, respBody, actual, "expected response with content") -} - -func Benchmark_compression(b *testing.B) { - client := &http.Client{ - Transport: &http.Transport{ - DisableCompression: true, - }, - } - - cases := map[string]struct { - enc string - numberOfLabels int - }{ - "gzip-10-labels": { - enc: gzipEncoding, - numberOfLabels: 10, - }, - "gzip-100-labels": { - enc: gzipEncoding, - numberOfLabels: 100, - }, - "gzip-1K-labels": { - enc: gzipEncoding, - numberOfLabels: 1000, - }, - "gzip-10K-labels": { - enc: gzipEncoding, - numberOfLabels: 10000, - }, - "gzip-100K-labels": { - enc: gzipEncoding, - numberOfLabels: 100000, - }, - "gzip-1M-labels": { - enc: gzipEncoding, - numberOfLabels: 1000000, - }, - } - - for name, tc := range cases { - b.Run(name, func(b *testing.B) { - tearDown := setup() - defer tearDown() - labels := labels.ScratchBuilder{} - - for i := 0; i < tc.numberOfLabels; i++ { - labels.Add(fmt.Sprintf("Name%v", i), fmt.Sprintf("Value%v", i)) - } - - respBody, err := json.Marshal(labels.Labels()) - require.NoError(b, err) - - hf := func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - w.Write(respBody) - } - h := CompressionHandler{ - Handler: http.HandlerFunc(hf), - } - - mux.Handle("/foo_endpoint", h) - - req, _ := http.NewRequest("GET", server.URL+"/foo_endpoint", nil) - req.Header.Set(acceptEncodingHeader, tc.enc) - - b.ReportAllocs() - b.ResetTimer() - - // Reusing the array to read the body and avoid allocation on the test - encRespBody := make([]byte, len(respBody)) - - for i := 0; i < b.N; i++ { - resp, err := client.Do(req) - - require.NoError(b, err) - - require.NoError(b, err, "client get failed with unexpected error") - responseBodySize := 0 - for { - n, err := resp.Body.Read(encRespBody) - responseBodySize += n - if err == io.EOF { - break - } - } - - b.ReportMetric(float64(responseBodySize), "ContentLength") - resp.Body.Close() - } - - client.CloseIdleConnections() - }) - } + expected := "Hello World!" + require.Equal(t, expected, actual, "expected response with content") } From c858049744a8cdf684a54cf67b8d16a41d69d81d Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Tue, 20 Jun 2023 17:13:02 +0200 Subject: [PATCH 205/632] Create 2.45.0-rc.1 (#12478) Signed-off-by: Jesus Vazquez --- CHANGELOG.md | 5 +++++ VERSION | 2 +- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/react-app/package.json | 4 ++-- 6 files changed, 18 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3b4ee3fc1..e4b61b13f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ # Changelog + +## 2.45.0-rc.1 / 2023-06-20 + +* [ENHANCEMENT] Reverts previous enhancement to the API Gzip Handler due to higher cpu and memory usage. #12363 + ## 2.45.0-rc.0 / 2023-06-05 This release is a LTS (Long-Term Support) release of Prometheus and will diff --git a/VERSION b/VERSION index ae2ba732a..362f077e4 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.45.0-rc.0 +2.45.0-rc.1 diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 6c404a6c4..f39187f9e 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.45.0-rc.0", + "version": "0.45.0-rc.1", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.45.0-rc.0", + "@prometheus-io/lezer-promql": "0.45.0-rc.1", "lru-cache": "^6.0.0" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 389eb88f4..1b6a1cebd 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.45.0-rc.0", + "version": "0.45.0-rc.1", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index f80d2fd6e..a773df147 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -28,10 +28,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.45.0-rc.0", + "version": "0.45.0-rc.1", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.45.0-rc.0", + "@prometheus-io/lezer-promql": "0.45.0-rc.1", "lru-cache": "^6.0.0" }, "devDependencies": { @@ -61,7 +61,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.45.0-rc.0", + "version": "0.45.0-rc.1", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.2.3", @@ -20765,7 +20765,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.45.0-rc.0", + "version": "0.45.0-rc.1", "dependencies": { "@codemirror/autocomplete": "^6.7.1", "@codemirror/commands": "^6.2.4", @@ -20783,7 +20783,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.45.0-rc.0", + "@prometheus-io/codemirror-promql": "0.45.0-rc.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.0", @@ -23423,7 +23423,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.45.0-rc.0", + "@prometheus-io/codemirror-promql": "0.45.0-rc.1", "@testing-library/react-hooks": "^7.0.2", "@types/enzyme": "^3.10.13", "@types/flot": "0.0.32", @@ -23487,7 +23487,7 @@ "@lezer/common": "^1.0.3", "@lezer/highlight": "^1.1.6", "@lezer/lr": "^1.3.6", - "@prometheus-io/lezer-promql": "0.45.0-rc.0", + "@prometheus-io/lezer-promql": "0.45.0-rc.1", "@types/lru-cache": "^5.1.1", "isomorphic-fetch": "^3.0.0", "lru-cache": "^6.0.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index d8efd5681..6f75d6503 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.45.0-rc.0", + "version": "0.45.0-rc.1", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.7.1", @@ -19,7 +19,7 @@ "@lezer/common": "^1.0.3", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.45.0-rc.0", + "@prometheus-io/codemirror-promql": "0.45.0-rc.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.0", From 87d08abe110406d27870d50ed4e1cfa5f8f12206 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 20 Jun 2023 20:58:47 +0100 Subject: [PATCH 206/632] labels: faster Compare function when using -tags stringlabels (#12451) Instead of unpacking every individual string, we skip to the point where there is a difference, going 8 bytes at a time where possible. Add benchmark for Compare; extend tests too. --------- Signed-off-by: Bryan Boreham Co-authored-by: Oleg Zaytsev --- model/labels/labels_stringlabels.go | 64 ++++++++++++++---------- model/labels/labels_test.go | 76 +++++++++++++++++++++-------- 2 files changed, 94 insertions(+), 46 deletions(-) diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index e232ca7a4..eb98fd6fb 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -422,37 +422,49 @@ func FromStrings(ss ...string) Labels { // Compare compares the two label sets. // The result will be 0 if a==b, <0 if a < b, and >0 if a > b. -// TODO: replace with Less function - Compare is never needed. -// TODO: just compare the underlying strings when we don't need alphanumeric sorting. func Compare(a, b Labels) int { - l := len(a.data) - if len(b.data) < l { - l = len(b.data) + // Find the first byte in the string where a and b differ. + shorter, longer := a.data, b.data + if len(b.data) < len(a.data) { + shorter, longer = b.data, a.data + } + i := 0 + // First, go 8 bytes at a time. Data strings are expected to be 8-byte aligned. + sp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&shorter)).Data) + lp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&longer)).Data) + for ; i < len(shorter)-8; i += 8 { + if *(*uint64)(unsafe.Add(sp, i)) != *(*uint64)(unsafe.Add(lp, i)) { + break + } + } + // Now go 1 byte at a time. + for ; i < len(shorter); i++ { + if shorter[i] != longer[i] { + break + } + } + if i == len(shorter) { + // One Labels was a prefix of the other; the set with fewer labels compares lower. + return len(a.data) - len(b.data) } - ia, ib := 0, 0 - for ia < l { - var aName, bName string - aName, ia = decodeString(a.data, ia) - bName, ib = decodeString(b.data, ib) - if aName != bName { - if aName < bName { - return -1 - } - return 1 - } - var aValue, bValue string - aValue, ia = decodeString(a.data, ia) - bValue, ib = decodeString(b.data, ib) - if aValue != bValue { - if aValue < bValue { - return -1 - } - return 1 + // Now we know that there is some difference before the end of a and b. + // Go back through the fields and find which field that difference is in. + firstCharDifferent := i + for i = 0; ; { + size, nextI := decodeSize(a.data, i) + if nextI+size > firstCharDifferent { + break } + i = nextI + size } - // If all labels so far were in common, the set with fewer labels comes first. - return len(a.data) - len(b.data) + // Difference is inside this entry. + aStr, _ := decodeString(a.data, i) + bStr, _ := decodeString(b.data, i) + if aStr < bStr { + return -1 + } + return +1 } // Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index 108d8b0de..f0f05734b 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -361,6 +361,18 @@ func TestLabels_Compare(t *testing.T) { "bbc", "222"), expected: -1, }, + { + compared: FromStrings( + "aaa", "111", + "bb", "222"), + expected: 1, + }, + { + compared: FromStrings( + "aaa", "111", + "bbbb", "222"), + expected: -1, + }, { compared: FromStrings( "aaa", "111"), @@ -380,6 +392,10 @@ func TestLabels_Compare(t *testing.T) { "bbb", "222"), expected: 0, }, + { + compared: EmptyLabels(), + expected: 1, + }, } sign := func(a int) int { @@ -395,6 +411,8 @@ func TestLabels_Compare(t *testing.T) { for i, test := range tests { got := Compare(labels, test.compared) require.Equal(t, sign(test.expected), sign(got), "unexpected comparison result for test case %d", i) + got = Compare(test.compared, labels) + require.Equal(t, -sign(test.expected), sign(got), "unexpected comparison result for reverse test case %d", i) } } @@ -468,27 +486,34 @@ func BenchmarkLabels_Get(b *testing.B) { } } +var comparisonBenchmarkScenarios = []struct { + desc string + base, other Labels +}{ + { + "equal", + FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + }, + { + "not equal", + FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStrings("a_label_name", "a_label_value", "another_label_name", "a_different_label_value"), + }, + { + "different sizes", + FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStrings("a_label_name", "a_label_value"), + }, + { + "lots", + FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrz"), + FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrr"), + }, +} + func BenchmarkLabels_Equals(b *testing.B) { - for _, scenario := range []struct { - desc string - base, other Labels - }{ - { - "equal", - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - }, - { - "not equal", - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - FromStrings("a_label_name", "a_label_value", "another_label_name", "a_different_label_value"), - }, - { - "different sizes", - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - FromStrings("a_label_name", "a_label_value"), - }, - } { + for _, scenario := range comparisonBenchmarkScenarios { b.Run(scenario.desc, func(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { @@ -498,6 +523,17 @@ func BenchmarkLabels_Equals(b *testing.B) { } } +func BenchmarkLabels_Compare(b *testing.B) { + for _, scenario := range comparisonBenchmarkScenarios { + b.Run(scenario.desc, func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = Compare(scenario.base, scenario.other) + } + }) + } +} + func TestLabels_Copy(t *testing.T) { require.Equal(t, FromStrings("aaa", "111", "bbb", "222"), FromStrings("aaa", "111", "bbb", "222").Copy()) } From be0b82cf2ffbe5558eaff537b23c0eaed152c76d Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Tue, 20 Jun 2023 22:20:01 +0200 Subject: [PATCH 207/632] Fix PR typo in 2.45.0-rc.1 CHANGELOG (#12479) Signed-off-by: Jesus Vazquez --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e4b61b13f..946073e61 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,7 @@ ## 2.45.0-rc.1 / 2023-06-20 -* [ENHANCEMENT] Reverts previous enhancement to the API Gzip Handler due to higher cpu and memory usage. #12363 +* [ENHANCEMENT] Reverts previous enhancement to the API Gzip Handler due to higher cpu and memory usage. #12476 ## 2.45.0-rc.0 / 2023-06-05 From 8ef767e396bf8445f009f945b0162fd71827f445 Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Fri, 23 Jun 2023 17:01:52 +0200 Subject: [PATCH 208/632] Release 2.45.0 (#12486) Signed-off-by: Jesus Vazquez --- CHANGELOG.md | 7 +------ VERSION | 2 +- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/react-app/package.json | 4 ++-- 6 files changed, 14 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 946073e61..76b6e51b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,11 +1,7 @@ # Changelog -## 2.45.0-rc.1 / 2023-06-20 - -* [ENHANCEMENT] Reverts previous enhancement to the API Gzip Handler due to higher cpu and memory usage. #12476 - -## 2.45.0-rc.0 / 2023-06-05 +## 2.45.0 / 2023-06-23 This release is a LTS (Long-Term Support) release of Prometheus and will receive security, documentation and bugfix patches for at least 12 months. @@ -20,7 +16,6 @@ Please read more about our LTS release cycle at * [FEATURE] Promtool: When providing the block id, only one block will be loaded and analyzed. #12031 * [FEATURE] Remote-write: New Azure ad configuration to support remote writing directly to Azure Monitor workspace. #11944 * [FEATURE] TSDB: Samples per chunk are now configurable with flag `storage.tsdb.samples-per-chunk`. By default set to its former value 120. #12055 -* [ENHANCEMENT] API: Improving Performance on the API Gzip Handler. #12363 * [ENHANCEMENT] Native histograms: bucket size can now be limited to avoid scrape fails. #12254 * [ENHANCEMENT] TSDB: Dropped series are now deleted from the WAL sooner. #12297 * [BUGFIX] Native histograms: ChunkSeries iterator now checks if a new sample can be appended to the open chunk. #12185 diff --git a/VERSION b/VERSION index 362f077e4..e599014ea 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.45.0-rc.1 +2.45.0 diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index f39187f9e..c7fafb133 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.45.0-rc.1", + "version": "0.45.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.45.0-rc.1", + "@prometheus-io/lezer-promql": "0.45.0", "lru-cache": "^6.0.0" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 1b6a1cebd..847a9610e 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.45.0-rc.1", + "version": "0.45.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index a773df147..ac365d0d0 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -28,10 +28,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.45.0-rc.1", + "version": "0.45.0", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.45.0-rc.1", + "@prometheus-io/lezer-promql": "0.45.0", "lru-cache": "^6.0.0" }, "devDependencies": { @@ -61,7 +61,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.45.0-rc.1", + "version": "0.45.0", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.2.3", @@ -20765,7 +20765,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.45.0-rc.1", + "version": "0.45.0", "dependencies": { "@codemirror/autocomplete": "^6.7.1", "@codemirror/commands": "^6.2.4", @@ -20783,7 +20783,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.45.0-rc.1", + "@prometheus-io/codemirror-promql": "0.45.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.0", @@ -23423,7 +23423,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.45.0-rc.1", + "@prometheus-io/codemirror-promql": "0.45.0", "@testing-library/react-hooks": "^7.0.2", "@types/enzyme": "^3.10.13", "@types/flot": "0.0.32", @@ -23487,7 +23487,7 @@ "@lezer/common": "^1.0.3", "@lezer/highlight": "^1.1.6", "@lezer/lr": "^1.3.6", - "@prometheus-io/lezer-promql": "0.45.0-rc.1", + "@prometheus-io/lezer-promql": "0.45.0", "@types/lru-cache": "^5.1.1", "isomorphic-fetch": "^3.0.0", "lru-cache": "^6.0.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 6f75d6503..81eba150a 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.45.0-rc.1", + "version": "0.45.0", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.7.1", @@ -19,7 +19,7 @@ "@lezer/common": "^1.0.3", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.45.0-rc.1", + "@prometheus-io/codemirror-promql": "0.45.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.0", From 02277bbe0dd2d89fd3fe5f508399d54da9c61827 Mon Sep 17 00:00:00 2001 From: Matt Harbison Date: Fri, 23 Jun 2023 16:37:04 -0400 Subject: [PATCH 209/632] Fix path handling in File-SD watcher to allow directory monitoring on Windows Previously, `d.paths` were normalized to backslashes on Windows, even when if the config file used Unix style. The end result meant always watching `./`, so changes for this config were always ignored: scrape_configs: - job_name: 'envmsc1' file_sd_configs: - files: - 'targets/envmsc1.d/*.yml' - 'targets/envmsc1.d/*.yaml' Additionally, unlike the other platforms, no warning was emitted on startup about not being able to install the watch if the directory didn't exist. Now it is logged. Signed-off-by: Matt Harbison --- discovery/file/file.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/discovery/file/file.go b/discovery/file/file.go index c45595c6d..60b63350f 100644 --- a/discovery/file/file.go +++ b/discovery/file/file.go @@ -226,8 +226,8 @@ func (d *Discovery) watchFiles() { panic("no watcher configured") } for _, p := range d.paths { - if idx := strings.LastIndex(p, "/"); idx > -1 { - p = p[:idx] + if dir, _ := filepath.Split(p); dir != "" { + p = dir } else { p = "./" } From e1115ae58d069b3f7fd19ffc6b635a6c98882148 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 26 Jun 2023 18:35:22 +0100 Subject: [PATCH 210/632] labels: improve Get method for stringlabels build (#12485) Inline one call to `decodeString`, and skip decoding the value string until we find a match for the name. Do a quick check on the first character in each string, and exit early if we've gone past - labels are sorted in order. Also improve tests and benchmark: * labels: test Get with varying lengths - it's not typical for Prometheus labels to all be the same length. * extend benchmark with label not found --------- Signed-off-by: Bryan Boreham --- model/labels/labels_stringlabels.go | 24 +++++++++++++++++++----- model/labels/labels_test.go | 6 ++++-- 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index eb98fd6fb..223aa6ebf 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -273,13 +273,27 @@ func (ls Labels) Copy() Labels { // Get returns the value for the label with the given name. // Returns an empty string if the label doesn't exist. func (ls Labels) Get(name string) string { + if name == "" { // Avoid crash in loop if someone asks for "". + return "" // Prometheus does not store blank label names. + } for i := 0; i < len(ls.data); { - var lName, lValue string - lName, i = decodeString(ls.data, i) - lValue, i = decodeString(ls.data, i) - if lName == name { - return lValue + var size int + size, i = decodeSize(ls.data, i) + if ls.data[i] == name[0] { + lName := ls.data[i : i+size] + i += size + if lName == name { + lValue, _ := decodeString(ls.data, i) + return lValue + } + } else { + if ls.data[i] > name[0] { // Stop looking if we've gone past. + break + } + i += size } + size, i = decodeSize(ls.data, i) + i += size } return "" } diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index f0f05734b..d91be27cb 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -443,7 +443,8 @@ func TestLabels_Has(t *testing.T) { func TestLabels_Get(t *testing.T) { require.Equal(t, "", FromStrings("aaa", "111", "bbb", "222").Get("foo")) - require.Equal(t, "111", FromStrings("aaa", "111", "bbb", "222").Get("aaa")) + require.Equal(t, "111", FromStrings("aaaa", "111", "bbb", "222").Get("aaaa")) + require.Equal(t, "222", FromStrings("aaaa", "111", "bbb", "222").Get("bbb")) } // BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation @@ -463,7 +464,7 @@ func BenchmarkLabels_Get(b *testing.B) { maxLabels := 30 allLabels := make([]Label, maxLabels) for i := 0; i < maxLabels; i++ { - allLabels[i] = Label{Name: strings.Repeat(string('a'+byte(i)), 5)} + allLabels[i] = Label{Name: strings.Repeat(string('a'+byte(i)), 5+(i%5))} } for _, size := range []int{5, 10, maxLabels} { b.Run(fmt.Sprintf("with %d labels", size), func(b *testing.B) { @@ -474,6 +475,7 @@ func BenchmarkLabels_Get(b *testing.B) { {"get first label", allLabels[0].Name}, {"get middle label", allLabels[size/2].Name}, {"get last label", allLabels[size-1].Name}, + {"get not-found label", "benchmark"}, } { b.Run(scenario.desc, func(b *testing.B) { b.ResetTimer() From d78661ba108f7771e196d69dea00711d51a57893 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 27 Jun 2023 08:58:52 +0200 Subject: [PATCH 211/632] Stepping up as 2.46 release shepherd (#12494) Signed-off-by: Julien Pivotto --- RELEASE.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index f5c907fe9..0d0918191 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -50,7 +50,9 @@ Release cadence of first pre-releases being cut is 6 weeks. | v2.43 | 2023-03-08 | Julien Pivotto (GitHub: @roidelapluie) | | v2.44 | 2023-04-19 | Bryan Boreham (GitHub: @bboreham) | | v2.45 LTS | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) | -| v2.46 | 2023-07-12 | **searching for volunteer** | +| v2.46 | 2023-07-12 | Julien Pivotto (GitHub: @roidelapluie) | +| v2.47 | 2023-08-23 | **searching for volunteer** | +| v2.48 | 2023-10-04 | **searching for volunteer** | If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. From 58d38c4c5630fb73afb5403d03ede884ad7fd91c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Gouteroux?= Date: Tue, 27 Jun 2023 09:30:39 +0200 Subject: [PATCH 212/632] fix: apply suggested changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: François Gouteroux --- cmd/promtool/metrics.go | 2 +- util/fmtutil/format.go | 33 ++++++++++++--------------------- util/fmtutil/format_test.go | 10 +++++----- 3 files changed, 18 insertions(+), 27 deletions(-) diff --git a/cmd/promtool/metrics.go b/cmd/promtool/metrics.go index 4a6fafd40..2bc2237e2 100644 --- a/cmd/promtool/metrics.go +++ b/cmd/promtool/metrics.go @@ -102,7 +102,7 @@ func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[strin } func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]string) bool { - metricsData, err := fmtutil.ParseMetricsTextAndFormat(bytes.NewReader(data), labels) + metricsData, err := fmtutil.MetricTextToWriteRequest(bytes.NewReader(data), labels) if err != nil { fmt.Fprintln(os.Stderr, " FAILED:", err) return false diff --git a/util/fmtutil/format.go b/util/fmtutil/format.go index 5cff0516b..9034a90fa 100644 --- a/util/fmtutil/format.go +++ b/util/fmtutil/format.go @@ -44,8 +44,18 @@ var MetricMetadataTypeValue = map[string]int32{ "STATESET": 7, } -// CreateWriteRequest convert metric family to a writerequest. -func CreateWriteRequest(mf map[string]*dto.MetricFamily, extraLabels map[string]string) (*prompb.WriteRequest, error) { +// MetricTextToWriteRequest consumes an io.Reader and return the data in write request format. +func MetricTextToWriteRequest(input io.Reader, labels map[string]string) (*prompb.WriteRequest, error) { + var parser expfmt.TextParser + mf, err := parser.TextToMetricFamilies(input) + if err != nil { + return nil, err + } + return MetricFamiliesToWriteRequest(mf, labels) +} + +// MetricFamiliesToWriteRequest convert metric family to a writerequest. +func MetricFamiliesToWriteRequest(mf map[string]*dto.MetricFamily, extraLabels map[string]string) (*prompb.WriteRequest, error) { wr := &prompb.WriteRequest{} // build metric list @@ -191,22 +201,3 @@ func makeLabelsMap(m *dto.Metric, metricName string, extraLabels map[string]stri return labels } - -// ParseMetricsTextReader consumes an io.Reader and returns the MetricFamily. -func ParseMetricsTextReader(input io.Reader) (map[string]*dto.MetricFamily, error) { - var parser expfmt.TextParser - mf, err := parser.TextToMetricFamilies(input) - if err != nil { - return nil, err - } - return mf, nil -} - -// ParseMetricsTextAndFormat return the data in the expected prometheus metrics write request format. -func ParseMetricsTextAndFormat(input io.Reader, labels map[string]string) (*prompb.WriteRequest, error) { - mf, err := ParseMetricsTextReader(input) - if err != nil { - return nil, err - } - return CreateWriteRequest(mf, labels) -} diff --git a/util/fmtutil/format_test.go b/util/fmtutil/format_test.go index 5c1ab5bde..0f052f5e7 100644 --- a/util/fmtutil/format_test.go +++ b/util/fmtutil/format_test.go @@ -201,13 +201,13 @@ func TestParseAndPushMetricsTextAndFormat(t *testing.T) { `)) labels := map[string]string{"job": "promtool"} - expected, err := ParseMetricsTextAndFormat(input, labels) + expected, err := MetricTextToWriteRequest(input, labels) require.NoError(t, err) require.Equal(t, writeRequestFixture, expected) } -func TestParseMetricsTextAndFormatErrorParsingFloatValue(t *testing.T) { +func TestMetricTextToWriteRequestErrorParsingFloatValue(t *testing.T) { input := bytes.NewReader([]byte(` # HELP http_requests_total The total number of HTTP requests. # TYPE http_requests_total counter @@ -216,11 +216,11 @@ func TestParseMetricsTextAndFormatErrorParsingFloatValue(t *testing.T) { `)) labels := map[string]string{"job": "promtool"} - _, err := ParseMetricsTextAndFormat(input, labels) + _, err := MetricTextToWriteRequest(input, labels) require.Equal(t, err.Error(), "text format parsing error in line 4: expected float as value, got \"1027Error\"") } -func TestParseMetricsTextAndFormatErrorParsingMetricType(t *testing.T) { +func TestMetricTextToWriteRequestErrorParsingMetricType(t *testing.T) { input := bytes.NewReader([]byte(` # HELP node_info node info summary. # TYPE node_info info @@ -228,6 +228,6 @@ func TestParseMetricsTextAndFormatErrorParsingMetricType(t *testing.T) { `)) labels := map[string]string{"job": "promtool"} - _, err := ParseMetricsTextAndFormat(input, labels) + _, err := MetricTextToWriteRequest(input, labels) require.Equal(t, err.Error(), "text format parsing error in line 3: unknown metric type \"info\"") } From 2c6b9c4cd4b7ce8232eaefaf6b1dbdccd6aeb130 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Jun 2023 11:14:27 +0000 Subject: [PATCH 213/632] build(deps): bump github.com/influxdata/influxdb Bumps [github.com/influxdata/influxdb](https://github.com/influxdata/influxdb) from 1.11.0 to 1.11.2. - [Release notes](https://github.com/influxdata/influxdb/releases) - [Changelog](https://github.com/influxdata/influxdb/blob/master/CHANGELOG_OLD.md) - [Commits](https://github.com/influxdata/influxdb/commits) --- updated-dependencies: - dependency-name: github.com/influxdata/influxdb dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 2 +- documentation/examples/remote_storage/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index c0d433196..2972854f1 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -7,7 +7,7 @@ require ( github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/influxdata/influxdb v1.11.0 + github.com/influxdata/influxdb v1.11.2 github.com/prometheus/client_golang v1.15.0 github.com/prometheus/common v0.42.0 github.com/prometheus/prometheus v0.44.0 diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index e0eac05c1..678ddfe2c 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -118,8 +118,8 @@ github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 h1:I5xhKLePXpX github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hetznercloud/hcloud-go v1.42.0 h1:Es/CDOForQN3nOOP5Vxh1N/YHjpCg386iYEX5zCgi+A= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/influxdata/influxdb v1.11.0 h1:0X+ZsbcOWc6AEi5MHee9BYqXCKmz8IZsljrRYjmV8Qg= -github.com/influxdata/influxdb v1.11.0/go.mod h1:V93tJcidY0Zh0LtSONZWnXXGDyt20dtVf+Ddp4EnhaA= +github.com/influxdata/influxdb v1.11.2 h1:qOF3uQN1mDfJNEKwbAgJsqehf8IXgKok2vlGm736oGo= +github.com/influxdata/influxdb v1.11.2/go.mod h1:eUMkLTE2vQwvSk6KGMrTBLKPaqSuczuelGbggigMPFw= github.com/ionos-cloud/sdk-go/v6 v6.1.6 h1:0n4irdqNska+1s3YMCRhrAqKbibEgQ7SwwhAlHzYT5A= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= From c18f5b64872429384ef0170bf8a39f1d809b7337 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Thu, 30 Mar 2023 11:04:37 +0200 Subject: [PATCH 214/632] Update golangci-lint * Update golangci-lint to v1.53.3. * Update the sync script handler for the old golanci-lint action. Signed-off-by: SuperQ --- .github/workflows/ci.yml | 2 +- Makefile.common | 2 +- scripts/golangci-lint.yml | 32 ++++++++++++++++++++++++++++++++ scripts/sync_repo_files.sh | 23 +++++++++++++++-------- 4 files changed, 49 insertions(+), 10 deletions(-) create mode 100644 scripts/golangci-lint.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bfa1ac00d..c0bccd0ef 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -150,7 +150,7 @@ jobs: uses: golangci/golangci-lint-action@v3.4.0 with: args: --verbose - version: v1.51.2 + version: v1.53.3 fuzzing: uses: ./.github/workflows/fuzzing.yml if: github.event_name == 'pull_request' diff --git a/Makefile.common b/Makefile.common index e372d3473..787feff08 100644 --- a/Makefile.common +++ b/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.51.2 +GOLANGCI_LINT_VERSION ?= v1.53.3 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml new file mode 100644 index 000000000..433f71b88 --- /dev/null +++ b/scripts/golangci-lint.yml @@ -0,0 +1,32 @@ +--- +# This action is synced from https://github.com/prometheus/prometheus +name: golangci-lint +on: + push: + paths: + - "go.sum" + - "go.mod" + - "**.go" + - "scripts/errcheck_excludes.txt" + - ".github/workflows/golangci-lint.yml" + - ".golangci.yml" + pull_request: + +jobs: + golangci: + name: lint + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: install Go + uses: actions/setup-go@v3 + with: + go-version: 1.20.x + - name: Install snmp_exporter/generator dependencies + run: sudo apt-get update && sudo apt-get -y install libsnmp-dev + if: github.repository == 'prometheus/snmp_exporter' + - name: Lint + uses: golangci/golangci-lint-action@v3.4.0 + with: + version: v1.53.3 diff --git a/scripts/sync_repo_files.sh b/scripts/sync_repo_files.sh index 3c987f07e..5e32eb883 100755 --- a/scripts/sync_repo_files.sh +++ b/scripts/sync_repo_files.sh @@ -37,7 +37,7 @@ if [ -z "${GITHUB_TOKEN}" ]; then fi # List of files that should be synced. -SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint .github/workflows/golangci-lint.yml" +SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint scripts/golangci-lint.yml" # Go to the root of the repo cd "$(git rev-parse --show-cdup)" || exit 1 @@ -115,20 +115,23 @@ process_repo() { local needs_update=() for source_file in ${SYNC_FILES}; do source_checksum="$(sha256sum "${source_dir}/${source_file}" | cut -d' ' -f1)" - - target_file="$(curl -sL --fail "https://raw.githubusercontent.com/${org_repo}/${default_branch}/${source_file}")" + if [[ "${source_file}" == 'scripts/golangci-lint.yml' ]] && ! check_go "${org_repo}" "${default_branch}" ; then + echo "${org_repo} is not Go, skipping golangci-lint.yml." + continue + fi if [[ "${source_file}" == 'LICENSE' ]] && ! check_license "${target_file}" ; then echo "LICENSE in ${org_repo} is not apache, skipping." continue fi - if [[ "${source_file}" == '.github/workflows/golangci-lint.yml' ]] && ! check_go "${org_repo}" "${default_branch}" ; then - echo "${org_repo} is not Go, skipping .github/workflows/golangci-lint.yml." - continue + target_filename="${source_file}" + if [[ "${source_file}" == 'scripts/golangci-lint.yml' ]] ; then + target_filename=".github/workflows/${source_file}" fi + target_file="$(curl -sL --fail "https://raw.githubusercontent.com/${org_repo}/${default_branch}/${target_filename}")" if [[ -z "${target_file}" ]]; then echo "${source_file} doesn't exist in ${org_repo}" case "${source_file}" in - CODE_OF_CONDUCT.md | SECURITY.md | .github/workflows/golangci-lint.yml) + CODE_OF_CONDUCT.md | SECURITY.md) echo "${source_file} missing in ${org_repo}, force updating." needs_update+=("${source_file}") ;; @@ -159,8 +162,12 @@ process_repo() { # Update the files in target repo by one from prometheus/prometheus. for source_file in "${needs_update[@]}"; do + target_filename="${source_file}" + if [[ "${source_file}" == 'scripts/golangci-lint.yml' ]] ; then + target_filename=".github/workflows/${source_file}" + fi case "${source_file}" in - *) cp -f "${source_dir}/${source_file}" "./${source_file}" ;; + *) cp -f "${source_dir}/${source_file}" "./${target_filename}" ;; esac done From 16c3cd35b1dfeafd4812d76db4cfa38f7c09ae67 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Tue, 4 Apr 2023 10:28:19 +0200 Subject: [PATCH 215/632] Disable revive unused-parameter Signed-off-by: SuperQ --- .golangci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.golangci.yml b/.golangci.yml index fc2721455..128ad4f04 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -56,3 +56,9 @@ linters-settings: local-prefixes: github.com/prometheus/prometheus gofumpt: extra-rules: true + revive: + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter + - name: unused-parameter + severity: warning + disabled: true From 4e9b89f88716d04c25ded01cc653052595073c73 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Tue, 27 Jun 2023 13:51:29 +0200 Subject: [PATCH 216/632] Update depguard config syntax. Signed-off-by: SuperQ --- .golangci.yml | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 128ad4f04..4a6daae59 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -31,14 +31,19 @@ issues: linters-settings: depguard: - list-type: blacklist - include-go-root: true - packages-with-error-message: - - sync/atomic: "Use go.uber.org/atomic instead of sync/atomic" - - github.com/stretchr/testify/assert: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert" - - github.com/go-kit/kit/log: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log" - - io/ioutil: "Use corresponding 'os' or 'io' functions instead." - - regexp: "Use github.com/grafana/regexp instead of regexp" + rules: + main: + deny: + - pkg: "sync/atomic" + desc: "Use go.uber.org/atomic instead of sync/atomic" + - pkg: "github.com/stretchr/testify/assert" + desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert" + - pkg: "github.com/go-kit/kit/log" + desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log" + - pkg: "io/ioutil" + desc: "Use corresponding 'os' or 'io' functions instead." + - pkg: "regexp" + desc: "Use github.com/grafana/regexp instead of regexp" errcheck: exclude-functions: # Don't flag lines such as "io.Copy(io.Discard, resp.Body)". From 686482ab34cf8cbbfdc88caa0edfd7cbdb8439b2 Mon Sep 17 00:00:00 2001 From: Matthias Loibl Date: Tue, 27 Jun 2023 18:10:38 +0200 Subject: [PATCH 217/632] Remove Add(0) Signed-off-by: Matthias Loibl --- web/web.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/web.go b/web/web.go index ddb5430ed..6a3eab327 100644 --- a/web/web.go +++ b/web/web.go @@ -158,7 +158,7 @@ func (m *metrics) instrumentHandlerWithPrefix(prefix string) func(handlerName st } func (m *metrics) instrumentHandler(handlerName string, handler http.HandlerFunc) http.HandlerFunc { - m.requestCounter.WithLabelValues(handlerName, "200").Add(0) + m.requestCounter.WithLabelValues(handlerName, "200") return promhttp.InstrumentHandlerCounter( m.requestCounter.MustCurryWith(prometheus.Labels{"handler": handlerName}), promhttp.InstrumentHandlerDuration( From 484a9e4071dbcd83c9bc7a878816723ce7114398 Mon Sep 17 00:00:00 2001 From: cui fliter Date: Thu, 29 Jun 2023 18:28:13 +0800 Subject: [PATCH 218/632] fix some typos (#12498) Signed-off-by: cui fliter --- CHANGELOG.md | 2 +- web/ui/react-app/public/index.html | 2 +- web/ui/react-app/src/contexts/ThemeContext.tsx | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 76b6e51b5..d316e84d3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,7 +63,7 @@ improvements for testing. #10991 * [FEATURE] Promtool: Add HTTP client configuration to query commands. #11487 * [FEATURE] Scrape: Add `scrape_config_files` to include scrape configs from different files. #12019 * [FEATURE] HTTP client: Add `no_proxy` to exclude URLs from proxied requests. #12098 -* [FEATURE] HTTP client: Add `proxy_from_enviroment` to read proxies from env variables. #12098 +* [FEATURE] HTTP client: Add `proxy_from_environment` to read proxies from env variables. #12098 * [ENHANCEMENT] API: Add support for setting lookback delta per query via the API. #12088 * [ENHANCEMENT] API: Change HTTP status code from 503/422 to 499 if a request is canceled. #11897 * [ENHANCEMENT] Scrape: Allow exemplars for all metric types. #11984 diff --git a/web/ui/react-app/public/index.html b/web/ui/react-app/public/index.html index 57131a26b..9aeb3555e 100755 --- a/web/ui/react-app/public/index.html +++ b/web/ui/react-app/public/index.html @@ -42,7 +42,7 @@ --> TITLE_PLACEHOLDER diff --git a/web/ui/react-app/src/contexts/ThemeContext.tsx b/web/ui/react-app/src/contexts/ThemeContext.tsx index 9ee84cf5e..2f2ee7073 100644 --- a/web/ui/react-app/src/contexts/ThemeContext.tsx +++ b/web/ui/react-app/src/contexts/ThemeContext.tsx @@ -9,7 +9,7 @@ export interface ThemeCtx { setTheme: (t: themeSetting) => void; } -// defaults, will be overriden in App.tsx +// defaults, will be overridden in App.tsx export const ThemeContext = React.createContext({ theme: 'light', userPreference: 'auto', From 031d22df9e43f1c3014b8344337e133099902ae8 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Fri, 30 Jun 2023 14:59:59 +0200 Subject: [PATCH 219/632] Fix race condition in ChunkDiskMapper.Truncate() (#12500) * Fix race condition in ChunkDiskMapper.Truncate() Signed-off-by: Marco Pracucci * Added unit test Signed-off-by: Marco Pracucci * Update tsdb/chunks/head_chunks.go Co-authored-by: Ganesh Vernekar Signed-off-by: Marco Pracucci --------- Signed-off-by: Marco Pracucci Co-authored-by: Ganesh Vernekar --- tsdb/chunks/head_chunks.go | 20 +++++++++---- tsdb/chunks/head_chunks_test.go | 52 +++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 5 deletions(-) diff --git a/tsdb/chunks/head_chunks.go b/tsdb/chunks/head_chunks.go index bcdab2125..d73eb36f8 100644 --- a/tsdb/chunks/head_chunks.go +++ b/tsdb/chunks/head_chunks.go @@ -948,12 +948,22 @@ func (cdm *ChunkDiskMapper) Truncate(fileNo uint32) error { if len(chkFileIndices) == len(removedFiles) { // All files were deleted. Reset the current sequence. cdm.evtlPosMtx.Lock() - if err == nil { - cdm.evtlPos.setSeq(0) - } else { - // In case of error, set it to the last file number on the disk that was not deleted. - cdm.evtlPos.setSeq(uint64(pendingDeletes[len(pendingDeletes)-1])) + + // We can safely reset the sequence only if the write queue is empty. If it's not empty, + // then there may be a job in the queue that will create a new segment file with an ID + // generated before the sequence reset. + // + // The queueIsEmpty() function must be called while holding the cdm.evtlPosMtx to avoid + // a race condition with WriteChunk(). + if cdm.writeQueue == nil || cdm.writeQueue.queueIsEmpty() { + if err == nil { + cdm.evtlPos.setSeq(0) + } else { + // In case of error, set it to the last file number on the disk that was not deleted. + cdm.evtlPos.setSeq(uint64(pendingDeletes[len(pendingDeletes)-1])) + } } + cdm.evtlPosMtx.Unlock() } diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go index 20a4c2064..68c133088 100644 --- a/tsdb/chunks/head_chunks_test.go +++ b/tsdb/chunks/head_chunks_test.go @@ -19,7 +19,9 @@ import ( "math/rand" "os" "strconv" + "sync" "testing" + "time" "github.com/stretchr/testify/require" @@ -356,6 +358,56 @@ func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) { verifyFiles([]int{5, 6, 7}) } +func TestChunkDiskMapper_Truncate_WriteQueueRaceCondition(t *testing.T) { + hrw := createChunkDiskMapper(t, "") + t.Cleanup(func() { + require.NoError(t, hrw.Close()) + }) + + // This test should only run when the queue is enabled. + if hrw.writeQueue == nil { + t.Skip("This test should only run when the queue is enabled") + } + + // Add an artificial delay in the writeChunk function to easily trigger the race condition. + origWriteChunk := hrw.writeQueue.writeChunk + hrw.writeQueue.writeChunk = func(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, ref ChunkDiskMapperRef, isOOO, cutFile bool) error { + time.Sleep(100 * time.Millisecond) + return origWriteChunk(seriesRef, mint, maxt, chk, ref, isOOO, cutFile) + } + + wg := sync.WaitGroup{} + wg.Add(2) + + // Write a chunk. Since the queue is enabled, the chunk will be written asynchronously (with the artificial delay). + ref := hrw.WriteChunk(1, 0, 10, randomChunk(t), false, func(err error) { + defer wg.Done() + require.NoError(t, err) + }) + + seq, _ := ref.Unpack() + require.Equal(t, 1, seq) + + // Truncate, simulating that all chunks from segment files before 1 can be dropped. + require.NoError(t, hrw.Truncate(1)) + + // Request to cut a new file when writing the next chunk. If there's a race condition, cutting a new file will + // allow us to detect there's actually an issue with the sequence number (because it's checked when a new segment + // file is created). + hrw.CutNewFile() + + // Write another chunk. This will cut a new file. + ref = hrw.WriteChunk(1, 0, 10, randomChunk(t), false, func(err error) { + defer wg.Done() + require.NoError(t, err) + }) + + seq, _ = ref.Unpack() + require.Equal(t, 2, seq) + + wg.Wait() +} + // TestHeadReadWriter_TruncateAfterIterateChunksError tests for // https://github.com/prometheus/prometheus/issues/7753 func TestHeadReadWriter_TruncateAfterFailedIterateChunks(t *testing.T) { From 1a8f06bb7a89bf2aec1505f459b8e9b15224fd21 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Sat, 1 Jul 2023 11:31:51 +0200 Subject: [PATCH 220/632] Update sync script. * Fix the path to the golanci-lint target filename. * Use the target filename when printing errors. * Put the yamllint filename back to previous name. Signed-off-by: SuperQ --- .yamllint.yml => .yamllint | 0 scripts/sync_repo_files.sh | 6 +++--- 2 files changed, 3 insertions(+), 3 deletions(-) rename .yamllint.yml => .yamllint (100%) diff --git a/.yamllint.yml b/.yamllint similarity index 100% rename from .yamllint.yml rename to .yamllint diff --git a/scripts/sync_repo_files.sh b/scripts/sync_repo_files.sh index 5e32eb883..d53ae8be7 100755 --- a/scripts/sync_repo_files.sh +++ b/scripts/sync_repo_files.sh @@ -125,11 +125,11 @@ process_repo() { fi target_filename="${source_file}" if [[ "${source_file}" == 'scripts/golangci-lint.yml' ]] ; then - target_filename=".github/workflows/${source_file}" + target_filename=".github/workflows/golangci-lint.yml" fi target_file="$(curl -sL --fail "https://raw.githubusercontent.com/${org_repo}/${default_branch}/${target_filename}")" if [[ -z "${target_file}" ]]; then - echo "${source_file} doesn't exist in ${org_repo}" + echo "${target_filename} doesn't exist in ${org_repo}" case "${source_file}" in CODE_OF_CONDUCT.md | SECURITY.md) echo "${source_file} missing in ${org_repo}, force updating." @@ -164,7 +164,7 @@ process_repo() { for source_file in "${needs_update[@]}"; do target_filename="${source_file}" if [[ "${source_file}" == 'scripts/golangci-lint.yml' ]] ; then - target_filename=".github/workflows/${source_file}" + target_filename=".github/workflows/golangci-lint.yml" fi case "${source_file}" in *) cp -f "${source_dir}/${source_file}" "./${target_filename}" ;; From 35069910f532b9078236177052425ae9605880c8 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Sat, 1 Jul 2023 14:29:59 +0200 Subject: [PATCH 221/632] Fix infinite loop in index Writer when a series contains duplicated label names Signed-off-by: Marco Pracucci --- tsdb/index/index.go | 5 ++++- tsdb/index/index_test.go | 15 +++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 50a701d3a..ef2d167dc 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -864,7 +864,10 @@ func (w *Writer) writePostingsToTmpFiles() error { // using more memory than a single label name can. for len(names) > 0 { if w.labelNames[names[0]]+c > maxPostings { - break + if c > 0 { + break + } + return fmt.Errorf("corruption detected when writing postings to index: label %q has %d uses, but maxPostings is %d", names[0], w.labelNames[names[0]], maxPostings) } batchNames = append(batchNames, names[0]) c += w.labelNames[names[0]] diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go index e4cee4a55..a978ba186 100644 --- a/tsdb/index/index_test.go +++ b/tsdb/index/index_test.go @@ -471,6 +471,21 @@ func TestPersistence_index_e2e(t *testing.T) { require.NoError(t, ir.Close()) } +func TestWriter_ShouldReturnErrorOnSeriesWithDuplicatedLabelNames(t *testing.T) { + w, err := NewWriter(context.Background(), filepath.Join(t.TempDir(), "index")) + require.NoError(t, err) + + require.NoError(t, w.AddSymbol("__name__")) + require.NoError(t, w.AddSymbol("metric_1")) + require.NoError(t, w.AddSymbol("metric_2")) + + require.NoError(t, w.AddSeries(0, labels.FromStrings("__name__", "metric_1", "__name__", "metric_2"))) + + err = w.Close() + require.Error(t, err) + require.ErrorContains(t, err, "corruption detected when writing postings to index") +} + func TestDecbufUvarintWithInvalidBuffer(t *testing.T) { b := realByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81}) From 81606762d2b62a1eb2a4419927108f948e8fdd8d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Jul 2023 23:05:56 +0000 Subject: [PATCH 222/632] build(deps): bump github.com/linode/linodego from 1.17.0 to 1.17.2 Bumps [github.com/linode/linodego](https://github.com/linode/linodego) from 1.17.0 to 1.17.2. - [Release notes](https://github.com/linode/linodego/releases) - [Commits](https://github.com/linode/linodego/compare/v1.17.0...v1.17.2) --- updated-dependencies: - dependency-name: github.com/linode/linodego dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f93d4afc9..50fea8169 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/ionos-cloud/sdk-go/v6 v6.1.7 github.com/json-iterator/go v1.1.12 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.17.0 + github.com/linode/linodego v1.17.2 github.com/miekg/dns v1.1.54 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/oklog/run v1.1.0 diff --git a/go.sum b/go.sum index fe998a000..e529c33f9 100644 --- a/go.sum +++ b/go.sum @@ -525,8 +525,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.17.0 h1:aWS98f0jUoY2lhsEuBxRdVkqyGM0nazPd68AEDF0EvU= -github.com/linode/linodego v1.17.0/go.mod h1:/omzPxie0/YI6S0sTw1q47qDt5IYSlbO/infRR4UG+A= +github.com/linode/linodego v1.17.2 h1:b32dj4662PGG5P9qVa6nBezccWdqgukndlMIuPGq1CQ= +github.com/linode/linodego v1.17.2/go.mod h1:C2iyT3Vg2O2sPxkWka4XAQ5WSUtm5LmTZ3Adw43Ra7Q= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= From aa75c60f9a52a8b8102b49b3edafa07d15856f5d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Jul 2023 23:06:08 +0000 Subject: [PATCH 223/632] build(deps): bump github.com/hetznercloud/hcloud-go Bumps [github.com/hetznercloud/hcloud-go](https://github.com/hetznercloud/hcloud-go) from 1.45.1 to 1.47.0. - [Release notes](https://github.com/hetznercloud/hcloud-go/releases) - [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/hetznercloud/hcloud-go/compare/v1.45.1...v1.47.0) --- updated-dependencies: - dependency-name: github.com/hetznercloud/hcloud-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index f93d4afc9..641c99185 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/consul/api v1.21.0 github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f - github.com/hetznercloud/hcloud-go v1.45.1 + github.com/hetznercloud/hcloud-go v1.47.0 github.com/ionos-cloud/sdk-go/v6 v6.1.7 github.com/json-iterator/go v1.1.12 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b @@ -43,7 +43,7 @@ require ( github.com/ovh/go-ovh v1.4.1 github.com/pkg/errors v0.9.1 github.com/prometheus/alertmanager v0.25.0 - github.com/prometheus/client_golang v1.15.1 + github.com/prometheus/client_golang v1.16.0 github.com/prometheus/client_model v0.4.0 github.com/prometheus/common v0.44.0 github.com/prometheus/common/assets v0.2.0 @@ -170,7 +170,7 @@ require ( github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect github.com/spf13/pflag v1.0.5 // indirect go.mongodb.org/mongo-driver v1.11.3 // indirect go.opencensus.io v0.24.0 // indirect diff --git a/go.sum b/go.sum index fe998a000..b2aa9f2e3 100644 --- a/go.sum +++ b/go.sum @@ -464,8 +464,8 @@ github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f/go.mod h1:Xjd3 github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go v1.45.1 h1:nl0OOklFfQT5J6AaNIOhl5Ruh3fhmGmhvZEqHbibVuk= -github.com/hetznercloud/hcloud-go v1.45.1/go.mod h1:aAUGxSfSnB8/lVXHNEDxtCT1jykaul8kqjD7f5KQXF8= +github.com/hetznercloud/hcloud-go v1.47.0 h1:WMZDwLPtMZwOLWIgERHrrrTzRFdHx0hTygYVQ4VWHW4= +github.com/hetznercloud/hcloud-go v1.47.0/go.mod h1:zSpmBnxIdb5oMdbpVg1Q977Cq2qiwERkjj3jqRbHH5U= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -659,8 +659,8 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= -github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -690,8 +690,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= From 4027ba3d67f230180c3758b02148e4970cbdf10c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Jul 2023 23:06:16 +0000 Subject: [PATCH 224/632] build(deps): bump google.golang.org/grpc from 1.55.0 to 1.56.1 Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.55.0 to 1.56.1. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.55.0...v1.56.1) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index f93d4afc9..2dcb5ec84 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/digitalocean/godo v1.99.0 github.com/docker/docker v24.0.2+incompatible github.com/edsrzf/mmap-go v1.1.0 - github.com/envoyproxy/go-control-plane v0.11.0 + github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f github.com/envoyproxy/protoc-gen-validate v1.0.1 github.com/fsnotify/fsnotify v1.6.0 github.com/go-kit/log v0.2.1 @@ -70,8 +70,8 @@ require ( golang.org/x/time v0.3.0 golang.org/x/tools v0.9.3 google.golang.org/api v0.114.0 - google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 - google.golang.org/grpc v1.55.0 + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 + google.golang.org/grpc v1.56.1 google.golang.org/protobuf v1.30.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -96,7 +96,7 @@ require ( ) require ( - cloud.google.com/go/compute v1.19.0 // indirect + cloud.google.com/go/compute v1.19.1 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect @@ -108,7 +108,7 @@ require ( github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74 // indirect + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect diff --git a/go.sum b/go.sum index fe998a000..d4edc92c5 100644 --- a/go.sum +++ b/go.sum @@ -19,8 +19,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= @@ -138,8 +138,8 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74 h1:zlUubfBUxApscKFsF4VSvvfhsBNTBu0eF/ddvpo96yk= -github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -185,8 +185,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= -github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f h1:7T++XKzy4xg7PKy+bM+Sa9/oe1OC88yz2hXQUISoXfA= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.1 h1:kt9FtLiooDc0vbwTLhdg3dyNX1K9Qwa1EK9LcD4jVUQ= github.com/envoyproxy/protoc-gen-validate v1.0.1/go.mod h1:0vj8bNkYbSTNS2PIyH87KZaeN4x9zpL9Qt8fQC7d+vs= @@ -1161,8 +1161,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 h1:khxVcsk/FhnzxMKOyD+TDGwjbEOpcPuIpmafPGFmhMA= -google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1184,8 +1184,8 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= -google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ= +google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 1a355ded01af32fc6d278a17678c5ed4ba6d2c5e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Jul 2023 23:10:44 +0000 Subject: [PATCH 225/632] build(deps): bump golangci/golangci-lint-action from 3.4.0 to 3.6.0 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 3.4.0 to 3.6.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v3.4.0...v3.6.0) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c0bccd0ef..e8b61bcad 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -147,7 +147,7 @@ jobs: run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' - name: Lint - uses: golangci/golangci-lint-action@v3.4.0 + uses: golangci/golangci-lint-action@v3.6.0 with: args: --verbose version: v1.53.3 From 5735373c9b97aa5aa47b9bab7eeb77a26bfeb7fe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Jul 2023 23:55:45 +0000 Subject: [PATCH 226/632] build(deps): bump github.com/prometheus/prometheus Bumps [github.com/prometheus/prometheus](https://github.com/prometheus/prometheus) from 0.44.0 to 0.45.0. - [Release notes](https://github.com/prometheus/prometheus/releases) - [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/prometheus/compare/v0.44.0...v0.45.0) --- updated-dependencies: - dependency-name: github.com/prometheus/prometheus dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 39 +++--- documentation/examples/remote_storage/go.sum | 123 +++++++++++-------- 2 files changed, 94 insertions(+), 68 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 196cad017..0dfb897ec 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -8,46 +8,55 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb v1.11.2 - github.com/prometheus/client_golang v1.15.0 - github.com/prometheus/common v0.42.0 - github.com/prometheus/prometheus v0.44.0 + github.com/prometheus/client_golang v1.15.1 + github.com/prometheus/common v0.44.0 + github.com/prometheus/prometheus v0.45.0 github.com/stretchr/testify v1.8.4 ) require ( + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect - github.com/aws/aws-sdk-go v1.44.245 // indirect + github.com/aws/aws-sdk-go v1.44.276 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/protobuf v1.5.3 // indirect + github.com/google/uuid v1.3.0 // indirect github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/kr/text v0.2.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 // indirect - go.opentelemetry.io/otel v1.14.0 // indirect - go.opentelemetry.io/otel/metric v0.37.0 // indirect - go.opentelemetry.io/otel/trace v1.14.0 // indirect - go.uber.org/atomic v1.10.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 // indirect + go.opentelemetry.io/otel v1.16.0 // indirect + go.opentelemetry.io/otel/metric v1.16.0 // indirect + go.opentelemetry.io/otel/trace v1.16.0 // indirect + go.uber.org/atomic v1.11.0 // indirect go.uber.org/goleak v1.2.1 // indirect - golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect - golang.org/x/net v0.9.0 // indirect - golang.org/x/oauth2 v0.7.0 // indirect - golang.org/x/sys v0.7.0 // indirect + golang.org/x/crypto v0.8.0 // indirect + golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/oauth2 v0.8.0 // indirect + golang.org/x/sys v0.8.0 // indirect golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 657b542a7..79e1595c3 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -1,12 +1,20 @@ github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 h1:gVXuXcWd1i4C2Ruxe321aU+IKGaStvGB/S90PUPB/W8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1/go.mod h1:DffdKW9RFqa5VgmsjUOsS7UE7eiA5iAvYUs63bhKQ0M= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 h1:T8quHYlUGyb/oqtSTwqlCr1ilJHrDv+ZtpSfo+hm1BU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1/go.mod h1:gLa1CL2RNE4s7M3yopJ/p0iq5DdY6Yv5ZUt9MTRZOQM= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 h1:oPdPEZFSbl7oSPEAIPMPBMUmiL+mqgzBJwM/9qYcwNg= +github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1/go.mod h1:4qFor3D/HDsvBME35Xy9rwW9DecL+M2sNw1ybjPtwA0= github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU= github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= @@ -19,8 +27,8 @@ github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAu github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.245 h1:KtY2s4q31/kn33AdV63R5t77mdxsI7rq3YT7Mgo805M= -github.com/aws/aws-sdk-go v1.44.245/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.276 h1:ywPlx9C5Yc482dUgAZ9bHpQ6onVvJvYE9FJWsNDCEy0= +github.com/aws/aws-sdk-go v1.44.276/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -28,22 +36,23 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20230112175826-46e39c7b9b43 h1:XP+uhjN0yBCN/tPkr8Z0BNDc5rZam9RG6UWyf2FrSQ0= +github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74 h1:zlUubfBUxApscKFsF4VSvvfhsBNTBu0eF/ddvpo96yk= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/digitalocean/godo v1.98.0 h1:potyC1eD0N9n5/P4/WmJuKgg+OGYZOBWEW+/aKTX6QQ= +github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E= +github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/docker v23.0.4+incompatible h1:Kd3Bh9V/rO+XpTP/BLqM+gx8z7+Yb0AA2Ibj+nNo4ek= +github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= -github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +github.com/envoyproxy/protoc-gen-validate v1.0.1 h1:kt9FtLiooDc0vbwTLhdg3dyNX1K9Qwa1EK9LcD4jVUQ= github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -59,8 +68,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= @@ -72,12 +81,11 @@ github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -100,11 +108,13 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/gophercloud/gophercloud v1.3.0 h1:RUKyCMiZoQR3VlVR5E3K7PK1AC3/qppsWYo6dtBiqs8= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gophercloud/gophercloud v1.4.0 h1:RqEu43vaX0lb0LanZr5BylK5ICVxjpFFoc0sxivyuHU= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= -github.com/hashicorp/consul/api v1.20.0 h1:9IHTjNVSZ7MIwjlW3N3a7iGiykCMDpxZu8jsxFJh0yc= +github.com/hashicorp/consul/api v1.21.0 h1:WMR2JiyuaQWRAMFaOGiYfY4Q4HRpyYRe/oYQofjyduM= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= @@ -114,13 +124,13 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= -github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 h1:I5xhKLePXpXgM6pZ4xZNTiurLLS3sGuZrZFFzAbM67A= +github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f h1:yxjcAZRuYymIDC0W4IQHgTe9EQdu2BsjPlVmKwyVZT4= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= -github.com/hetznercloud/hcloud-go v1.42.0 h1:Es/CDOForQN3nOOP5Vxh1N/YHjpCg386iYEX5zCgi+A= +github.com/hetznercloud/hcloud-go v1.45.1 h1:nl0OOklFfQT5J6AaNIOhl5Ruh3fhmGmhvZEqHbibVuk= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/influxdata/influxdb v1.11.2 h1:qOF3uQN1mDfJNEKwbAgJsqehf8IXgKok2vlGm736oGo= github.com/influxdata/influxdb v1.11.2/go.mod h1:eUMkLTE2vQwvSk6KGMrTBLKPaqSuczuelGbggigMPFw= -github.com/ionos-cloud/sdk-go/v6 v6.1.6 h1:0n4irdqNska+1s3YMCRhrAqKbibEgQ7SwwhAlHzYT5A= +github.com/ionos-cloud/sdk-go/v6 v6.1.7 h1:uVG1Q/ZDJ7YmCI9Oevpue9xJEH5UrUMyXv8gm7NTxIw= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -146,14 +156,16 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/linode/linodego v1.16.1 h1:5otq57M4PdHycPERRfSFZ0s1yz1ETVWGjCp3hh7+F9w= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/linode/linodego v1.17.0 h1:aWS98f0jUoY2lhsEuBxRdVkqyGM0nazPd68AEDF0EvU= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw= +github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -170,6 +182,8 @@ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -180,19 +194,19 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM= -github.com/prometheus/client_golang v1.15.0/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= +github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -201,16 +215,17 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/prometheus/prometheus v0.44.0 h1:sgn8Fdx+uE5tHQn0/622swlk2XnIj6udoZCnbVjHIgc= -github.com/prometheus/prometheus v0.44.0/go.mod h1:aPsmIK3py5XammeTguyqTmuqzX/jeCdyOWWobLHNKQg= +github.com/prometheus/prometheus v0.45.0 h1:O/uG+Nw4kNxx/jDPxmjsSDd+9Ohql6E7ZSY1x5x/0KI= +github.com/prometheus/prometheus v0.45.0/go.mod h1:jC5hyO8ItJBnDWGecbEucMyXjzxGv9cxsxsjS9u5s1w= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 h1:Y7xOFbD+3jaPw+VN7lkakNJ/pa+ZSQVFp1ONtJaBxns= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.17 h1:1WuWJu7/e8SqK+uQl7lfk/N/oMZTL2NE/TJsNKRNMc4= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -223,16 +238,16 @@ github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtX github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 h1:lE9EJyw3/JhrjWH/hEy9FptnalDQgj7vpbgC2KCCCxE= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0/go.mod h1:pcQ3MM3SWvrA71U4GDqv9UFDJ3HQsW7y5ZO3tDTlUdI= -go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= -go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= -go.opentelemetry.io/otel/metric v0.37.0 h1:pHDQuLQOZwYD+Km0eb657A25NaRzy0a+eLyKfDXedEs= -go.opentelemetry.io/otel/metric v0.37.0/go.mod h1:DmdaHfGt54iV6UKxsV9slj2bBRJcKC1B1uvDLIioc1s= -go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= -go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0/go.mod h1:XiYsayHc36K3EByOO6nbAXnAWbrUxdjUROCEeeROOH8= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -240,9 +255,10 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -263,12 +279,12 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -276,7 +292,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -292,15 +308,16 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= +golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -316,7 +333,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= +golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -325,8 +342,8 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 h1:khxVcsk/FhnzxMKOyD+TDGwjbEOpcPuIpmafPGFmhMA= +google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -358,8 +375,8 @@ k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ= k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ= k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= -k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d h1:VcFq5n7wCJB2FQMCIHfC+f+jNcGgNMar1uKd6rVlifU= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= k8s.io/utils v0.0.0-20230308161112-d77c459e9343 h1:m7tbIjXGcGIAtpmQr7/NAi7RsWoW3E7Zcm4jI1HicTc= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= From 5255bf06ad0816f2823560b21aeb70dd7b2046a6 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sun, 2 Jul 2023 22:16:26 +0000 Subject: [PATCH 227/632] Replace sort.Slice with faster slices.SortFunc The generic version is more efficient. Signed-off-by: Bryan Boreham --- cmd/promtool/tsdb.go | 4 ++-- rules/manager.go | 17 ++++++++--------- scrape/scrape.go | 6 +++--- storage/remote/read_handler.go | 6 +++--- tsdb/compact.go | 10 +++++----- tsdb/db.go | 14 +++++++------- tsdb/exemplar.go | 6 +++--- tsdb/head_read.go | 4 ++-- tsdb/index/postings.go | 8 ++++---- tsdb/index/postingsstats.go | 7 ++++--- tsdb/wlog/checkpoint.go | 6 +++--- tsdb/wlog/wlog.go | 6 +++--- 12 files changed, 47 insertions(+), 47 deletions(-) diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index b7fad5fe0..844a387c4 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -23,7 +23,6 @@ import ( "path/filepath" "runtime" "runtime/pprof" - "sort" "strconv" "strings" "sync" @@ -34,6 +33,7 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/index" + "golang.org/x/exp/slices" "github.com/alecthomas/units" "github.com/go-kit/log" @@ -447,7 +447,7 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error { postingInfos := []postingInfo{} printInfo := func(postingInfos []postingInfo) { - sort.Slice(postingInfos, func(i, j int) bool { return postingInfos[i].metric > postingInfos[j].metric }) + slices.SortFunc(postingInfos, func(a, b postingInfo) bool { return a.metric > b.metric }) for i, pc := range postingInfos { if i >= limit { diff --git a/rules/manager.go b/rules/manager.go index 31c90e9e9..4f848a74b 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -19,7 +19,6 @@ import ( "fmt" "math" "net/url" - "sort" "sync" "time" @@ -30,6 +29,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/rulefmt" @@ -490,10 +490,9 @@ func (g *Group) AlertingRules() []*AlertingRule { alerts = append(alerts, alertingRule) } } - sort.Slice(alerts, func(i, j int) bool { - return alerts[i].State() > alerts[j].State() || - (alerts[i].State() == alerts[j].State() && - alerts[i].Name() < alerts[j].Name()) + slices.SortFunc(alerts, func(a, b *AlertingRule) bool { + return a.State() > b.State() || + (a.State() == b.State() && a.Name() < b.Name()) }) return alerts } @@ -1189,11 +1188,11 @@ func (m *Manager) RuleGroups() []*Group { rgs = append(rgs, g) } - sort.Slice(rgs, func(i, j int) bool { - if rgs[i].file != rgs[j].file { - return rgs[i].file < rgs[j].file + slices.SortFunc(rgs, func(a, b *Group) bool { + if a.file != b.file { + return a.file < b.file } - return rgs[i].name < rgs[j].name + return a.name < b.name }) return rgs diff --git a/scrape/scrape.go b/scrape/scrape.go index 8c4cc51e7..ec65c5ad9 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -23,7 +23,6 @@ import ( "math" "net/http" "reflect" - "sort" "strconv" "sync" "time" @@ -35,6 +34,7 @@ import ( config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -720,8 +720,8 @@ func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*re } func resolveConflictingExposedLabels(lb *labels.Builder, conflictingExposedLabels []labels.Label) { - sort.SliceStable(conflictingExposedLabels, func(i, j int) bool { - return len(conflictingExposedLabels[i].Name) < len(conflictingExposedLabels[j].Name) + slices.SortStableFunc(conflictingExposedLabels, func(a, b labels.Label) bool { + return len(a.Name) < len(b.Name) }) for _, l := range conflictingExposedLabels { diff --git a/storage/remote/read_handler.go b/storage/remote/read_handler.go index 116eb9596..aca4d7dd5 100644 --- a/storage/remote/read_handler.go +++ b/storage/remote/read_handler.go @@ -16,12 +16,12 @@ package remote import ( "context" "net/http" - "sort" "sync" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" @@ -92,8 +92,8 @@ func (h *readHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { Value: value, }) } - sort.Slice(sortedExternalLabels, func(i, j int) bool { - return sortedExternalLabels[i].Name < sortedExternalLabels[j].Name + slices.SortFunc(sortedExternalLabels, func(a, b prompb.Label) bool { + return a.Name < b.Name }) responseType, err := NegotiateResponseType(req.AcceptedResponseTypes) diff --git a/tsdb/compact.go b/tsdb/compact.go index e2b6f4c5e..0d42f627f 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -20,7 +20,6 @@ import ( "io" "os" "path/filepath" - "sort" "time" "github.com/go-kit/log" @@ -28,6 +27,7 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -200,8 +200,8 @@ func (c *LeveledCompactor) Plan(dir string) ([]string, error) { } func (c *LeveledCompactor) plan(dms []dirMeta) ([]string, error) { - sort.Slice(dms, func(i, j int) bool { - return dms[i].meta.MinTime < dms[j].meta.MinTime + slices.SortFunc(dms, func(a, b dirMeta) bool { + return a.meta.MinTime < b.meta.MinTime }) res := c.selectOverlappingDirs(dms) @@ -380,8 +380,8 @@ func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta { for s := range sources { res.Compaction.Sources = append(res.Compaction.Sources, s) } - sort.Slice(res.Compaction.Sources, func(i, j int) bool { - return res.Compaction.Sources[i].Compare(res.Compaction.Sources[j]) < 0 + slices.SortFunc(res.Compaction.Sources, func(a, b ulid.ULID) bool { + return a.Compare(b) < 0 }) res.MinTime = mint diff --git a/tsdb/db.go b/tsdb/db.go index 32dae57a5..62359a737 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -22,7 +22,6 @@ import ( "math" "os" "path/filepath" - "sort" "strconv" "strings" "sync" @@ -34,6 +33,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" + "golang.org/x/exp/slices" "golang.org/x/sync/errgroup" "github.com/prometheus/prometheus/config" @@ -579,8 +579,8 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { return nil, nil } - sort.Slice(loadable, func(i, j int) bool { - return loadable[i].Meta().MinTime < loadable[j].Meta().MinTime + slices.SortFunc(loadable, func(a, b *Block) bool { + return a.Meta().MinTime < b.Meta().MinTime }) blockMetas := make([]BlockMeta, 0, len(loadable)) @@ -1445,8 +1445,8 @@ func (db *DB) reloadBlocks() (err error) { } db.metrics.blocksBytes.Set(float64(blocksSize)) - sort.Slice(toLoad, func(i, j int) bool { - return toLoad[i].Meta().MinTime < toLoad[j].Meta().MinTime + slices.SortFunc(toLoad, func(a, b *Block) bool { + return a.Meta().MinTime < b.Meta().MinTime }) // Swap new blocks first for subsequently created readers to be seen. @@ -1515,8 +1515,8 @@ func deletableBlocks(db *DB, blocks []*Block) map[ulid.ULID]struct{} { // Sort the blocks by time - newest to oldest (largest to smallest timestamp). // This ensures that the retentions will remove the oldest blocks. - sort.Slice(blocks, func(i, j int) bool { - return blocks[i].Meta().MaxTime > blocks[j].Meta().MaxTime + slices.SortFunc(blocks, func(a, b *Block) bool { + return a.Meta().MaxTime > b.Meta().MaxTime }) for _, block := range blocks { diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index 01718bb57..bf401da3c 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -15,11 +15,11 @@ package tsdb import ( "context" - "sort" "sync" "unicode/utf8" "github.com/prometheus/client_golang/prometheus" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" @@ -185,8 +185,8 @@ func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*label } } - sort.Slice(ret, func(i, j int) bool { - return labels.Compare(ret[i].SeriesLabels, ret[j].SeriesLabels) < 0 + slices.SortFunc(ret, func(a, b exemplar.QueryResult) bool { + return labels.Compare(a.SeriesLabels, b.SeriesLabels) < 0 }) return ret, nil diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 0e6e005ea..c7146026a 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -137,8 +137,8 @@ func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { return index.ErrPostings(errors.Wrap(err, "expand postings")) } - sort.Slice(series, func(i, j int) bool { - return labels.Compare(series[i].lset, series[j].lset) < 0 + slices.SortFunc(series, func(a, b *memSeries) bool { + return labels.Compare(a.lset, b.lset) < 0 }) // Convert back to list. diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 2ac6edbdc..3be8a1997 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -107,11 +107,11 @@ func (p *MemPostings) SortedKeys() []labels.Label { } p.mtx.RUnlock() - sort.Slice(keys, func(i, j int) bool { - if keys[i].Name != keys[j].Name { - return keys[i].Name < keys[j].Name + slices.SortFunc(keys, func(a, b labels.Label) bool { + if a.Name != b.Name { + return a.Name < b.Name } - return keys[i].Value < keys[j].Value + return a.Value < b.Value }) return keys } diff --git a/tsdb/index/postingsstats.go b/tsdb/index/postingsstats.go index 6b29bddab..8e5f62dba 100644 --- a/tsdb/index/postingsstats.go +++ b/tsdb/index/postingsstats.go @@ -15,7 +15,8 @@ package index import ( "math" - "sort" + + "golang.org/x/exp/slices" ) // Stat holds values for a single cardinality statistic. @@ -62,8 +63,8 @@ func (m *maxHeap) push(item Stat) { } func (m *maxHeap) get() []Stat { - sort.Slice(m.Items, func(i, j int) bool { - return m.Items[i].Count > m.Items[j].Count + slices.SortFunc(m.Items, func(a, b Stat) bool { + return a.Count > b.Count }) return m.Items } diff --git a/tsdb/wlog/checkpoint.go b/tsdb/wlog/checkpoint.go index 42b03f48f..fe9952a30 100644 --- a/tsdb/wlog/checkpoint.go +++ b/tsdb/wlog/checkpoint.go @@ -20,13 +20,13 @@ import ( "math" "os" "path/filepath" - "sort" "strconv" "strings" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/pkg/errors" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/tsdb/chunks" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -374,8 +374,8 @@ func listCheckpoints(dir string) (refs []checkpointRef, err error) { refs = append(refs, checkpointRef{name: fi.Name(), index: idx}) } - sort.Slice(refs, func(i, j int) bool { - return refs[i].index < refs[j].index + slices.SortFunc(refs, func(a, b checkpointRef) bool { + return a.index < b.index }) return refs, nil diff --git a/tsdb/wlog/wlog.go b/tsdb/wlog/wlog.go index e38cb94cb..b7b1623f9 100644 --- a/tsdb/wlog/wlog.go +++ b/tsdb/wlog/wlog.go @@ -22,7 +22,6 @@ import ( "io" "os" "path/filepath" - "sort" "strconv" "sync" "time" @@ -32,6 +31,7 @@ import ( "github.com/golang/snappy" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/tsdb/fileutil" ) @@ -859,8 +859,8 @@ func listSegments(dir string) (refs []segmentRef, err error) { } refs = append(refs, segmentRef{name: fn, index: k}) } - sort.Slice(refs, func(i, j int) bool { - return refs[i].index < refs[j].index + slices.SortFunc(refs, func(a, b segmentRef) bool { + return a.index < b.index }) for i := 0; i < len(refs)-1; i++ { if refs[i].index+1 != refs[i+1].index { From 3f230fc9f894d76cccc2dc75ea11909004c28c94 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Mon, 3 Jul 2023 15:56:06 +0300 Subject: [PATCH 228/632] promql: convert QueryOpts to interface MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Convert QueryOpts to an interface so that downstream projects like https://github.com/thanos-community/promql-engine could extend the query options with engine specific options that are not in the original engine. Will be used to enable query analysis per-query. Signed-off-by: Giedrius Statkevičius --- promql/engine.go | 43 ++++++++++++++++++++++++++++++++---------- promql/engine_test.go | 6 ++---- web/api/v1/api.go | 23 +++++++++++++--------- web/api/v1/api_test.go | 20 +++++++------------- 4 files changed, 56 insertions(+), 36 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index f29db3a64..83bbdeff8 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -130,11 +130,35 @@ type Query interface { String() string } -type QueryOpts struct { +type PrometheusQueryOpts struct { // Enables recording per-step statistics if the engine has it enabled as well. Disabled by default. - EnablePerStepStats bool + enablePerStepStats bool // Lookback delta duration for this query. - LookbackDelta time.Duration + lookbackDelta time.Duration +} + +var _ QueryOpts = &PrometheusQueryOpts{} + +func NewPrometheusQueryOpts(enablePerStepStats bool, lookbackDelta time.Duration) QueryOpts { + return &PrometheusQueryOpts{ + enablePerStepStats: enablePerStepStats, + lookbackDelta: lookbackDelta, + } +} + +func (p *PrometheusQueryOpts) EnablePerStepStats() bool { + return p.enablePerStepStats +} + +func (p *PrometheusQueryOpts) LookbackDelta() time.Duration { + return p.lookbackDelta +} + +type QueryOpts interface { + // Enables recording per-step statistics if the engine has it enabled as well. Disabled by default. + EnablePerStepStats() bool + // Lookback delta duration for this query. + LookbackDelta() time.Duration } // query implements the Query interface. @@ -408,7 +432,7 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { } // NewInstantQuery returns an evaluation query for the given expression at the given time. -func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { +func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, ts time.Time) (Query, error) { pExpr, qry := ng.newQuery(q, qs, opts, ts, ts, 0) finishQueue, err := ng.queueActive(ctx, qry) if err != nil { @@ -429,7 +453,7 @@ func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts // NewRangeQuery returns an evaluation query for the given time range and with // the resolution set by the interval. -func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { +func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { pExpr, qry := ng.newQuery(q, qs, opts, start, end, interval) finishQueue, err := ng.queueActive(ctx, qry) if err != nil { @@ -451,13 +475,12 @@ func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts * return qry, nil } -func (ng *Engine) newQuery(q storage.Queryable, qs string, opts *QueryOpts, start, end time.Time, interval time.Duration) (*parser.Expr, *query) { - // Default to empty QueryOpts if not provided. +func (ng *Engine) newQuery(q storage.Queryable, qs string, opts QueryOpts, start, end time.Time, interval time.Duration) (*parser.Expr, *query) { if opts == nil { - opts = &QueryOpts{} + opts = NewPrometheusQueryOpts(false, 0) } - lookbackDelta := opts.LookbackDelta + lookbackDelta := opts.LookbackDelta() if lookbackDelta <= 0 { lookbackDelta = ng.lookbackDelta } @@ -473,7 +496,7 @@ func (ng *Engine) newQuery(q storage.Queryable, qs string, opts *QueryOpts, star stmt: es, ng: ng, stats: stats.NewQueryTimers(), - sampleStats: stats.NewQuerySamples(ng.enablePerStepStats && opts.EnablePerStepStats), + sampleStats: stats.NewQuerySamples(ng.enablePerStepStats && opts.EnablePerStepStats()), queryable: q, } return &es.Expr, qry diff --git a/promql/engine_test.go b/promql/engine_test.go index 72cbf9153..5ffebc202 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -1198,7 +1198,7 @@ load 10s origMaxSamples := engine.maxSamplesPerQuery for _, c := range cases { t.Run(c.Query, func(t *testing.T) { - opts := &QueryOpts{EnablePerStepStats: true} + opts := NewPrometheusQueryOpts(true, 0) engine.maxSamplesPerQuery = origMaxSamples runQuery := func(expErr error) *stats.Statistics { @@ -4626,9 +4626,7 @@ metric 0 1 2 if c.engineLookback != 0 { eng.lookbackDelta = c.engineLookback } - opts := &QueryOpts{ - LookbackDelta: c.queryLookback, - } + opts := NewPrometheusQueryOpts(false, c.queryLookback) qry, err := eng.NewInstantQuery(test.context, test.Queryable(), opts, query, c.ts) require.NoError(t, err) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index ff339b09a..43ceaa6be 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -178,8 +178,13 @@ type TSDBAdminStats interface { // QueryEngine defines the interface for the *promql.Engine, so it can be replaced, wrapped or mocked. type QueryEngine interface { SetQueryLogger(l promql.QueryLogger) - NewInstantQuery(ctx context.Context, q storage.Queryable, opts *promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) - NewRangeQuery(ctx context.Context, q storage.Queryable, opts *promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) + NewInstantQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) + NewRangeQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) +} + +type QueryOpts interface { + EnablePerStepStats() bool + LookbackDelta() time.Duration } // API can register a set of endpoints in a router and handle @@ -462,18 +467,18 @@ func (api *API) formatQuery(r *http.Request) (result apiFuncResult) { return apiFuncResult{expr.Pretty(0), nil, nil, nil} } -func extractQueryOpts(r *http.Request) (*promql.QueryOpts, error) { - opts := &promql.QueryOpts{ - EnablePerStepStats: r.FormValue("stats") == "all", - } +func extractQueryOpts(r *http.Request) (promql.QueryOpts, error) { + var duration time.Duration + if strDuration := r.FormValue("lookback_delta"); strDuration != "" { - duration, err := parseDuration(strDuration) + parsedDuration, err := parseDuration(strDuration) if err != nil { return nil, fmt.Errorf("error parsing lookback delta duration: %w", err) } - opts.LookbackDelta = duration + duration = parsedDuration } - return opts, nil + + return promql.NewPrometheusQueryOpts(r.FormValue("stats") == "all", duration), nil } func (api *API) queryRange(r *http.Request) (result apiFuncResult) { diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 16e74071c..3aa10ee44 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -3627,7 +3627,7 @@ func TestExtractQueryOpts(t *testing.T) { tests := []struct { name string form url.Values - expect *promql.QueryOpts + expect promql.QueryOpts err error }{ { @@ -3635,9 +3635,8 @@ func TestExtractQueryOpts(t *testing.T) { form: url.Values{ "stats": []string{"all"}, }, - expect: &promql.QueryOpts{ - EnablePerStepStats: true, - }, + expect: promql.NewPrometheusQueryOpts(true, 0), + err: nil, }, { @@ -3645,10 +3644,8 @@ func TestExtractQueryOpts(t *testing.T) { form: url.Values{ "stats": []string{"none"}, }, - expect: &promql.QueryOpts{ - EnablePerStepStats: false, - }, - err: nil, + expect: promql.NewPrometheusQueryOpts(false, 0), + err: nil, }, { name: "with lookback delta", @@ -3656,11 +3653,8 @@ func TestExtractQueryOpts(t *testing.T) { "stats": []string{"all"}, "lookback_delta": []string{"30s"}, }, - expect: &promql.QueryOpts{ - EnablePerStepStats: true, - LookbackDelta: 30 * time.Second, - }, - err: nil, + expect: promql.NewPrometheusQueryOpts(true, 30*time.Second), + err: nil, }, { name: "with invalid lookback delta", From 68e59374741f104eab20a0a9eb0aaba8ea437cbc Mon Sep 17 00:00:00 2001 From: Patrick Oyarzun Date: Tue, 4 Jul 2023 04:37:58 -0500 Subject: [PATCH 229/632] Apply relevant label matchers in LabelValues before fetching extra postings (#12274) * Apply matchers when fetching label values Signed-off-by: Patrick Oyarzun * Avoid extra copying of label values Signed-off-by: Patrick Oyarzun --------- Signed-off-by: Patrick Oyarzun --- tsdb/querier.go | 20 ++++++++++++++++++++ tsdb/querier_bench_test.go | 1 + 2 files changed, 21 insertions(+) diff --git a/tsdb/querier.go b/tsdb/querier.go index 72b6b5141..98ee34e50 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -414,6 +414,26 @@ func labelValuesWithMatchers(r IndexReader, name string, matchers ...*labels.Mat if err != nil { return nil, errors.Wrapf(err, "fetching values of label %s", name) } + + // If we have a matcher for the label name, we can filter out values that don't match + // before we fetch postings. This is especially useful for labels with many values. + // e.g. __name__ with a selector like {__name__="xyz"} + for _, m := range matchers { + if m.Name != name { + continue + } + + // re-use the allValues slice to avoid allocations + // this is safe because the iteration is always ahead of the append + filteredValues := allValues[:0] + for _, v := range allValues { + if m.Matches(v) { + filteredValues = append(filteredValues, v) + } + } + allValues = filteredValues + } + valuesPostings := make([]index.Postings, len(allValues)) for i, value := range allValues { valuesPostings[i], err = r.Postings(name, value) diff --git a/tsdb/querier_bench_test.go b/tsdb/querier_bench_test.go index c6deaeb44..1657061fd 100644 --- a/tsdb/querier_bench_test.go +++ b/tsdb/querier_bench_test.go @@ -188,6 +188,7 @@ func benchmarkLabelValuesWithMatchers(b *testing.B, ir IndexReader) { labelName string matchers []*labels.Matcher }{ + {`i with i="1"`, "i", []*labels.Matcher{i1}}, // i has 100k values. {`i with n="1"`, "i", []*labels.Matcher{n1}}, {`i with n="^.+$"`, "i", []*labels.Matcher{nPlus}}, From e8d4466ef46d353b46b344724bedd5492cef8f76 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 4 Jul 2023 11:58:23 +0000 Subject: [PATCH 230/632] build(deps): bump bufbuild/buf-setup-action from 1.20.0 to 1.23.1 Bumps [bufbuild/buf-setup-action](https://github.com/bufbuild/buf-setup-action) from 1.20.0 to 1.23.1. - [Release notes](https://github.com/bufbuild/buf-setup-action/releases) - [Commits](https://github.com/bufbuild/buf-setup-action/compare/v1.20.0...v1.23.1) --- updated-dependencies: - dependency-name: bufbuild/buf-setup-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/buf-lint.yml | 2 +- .github/workflows/buf.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index a72837b79..1b05a2869 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.20.0 + - uses: bufbuild/buf-setup-action@v1.23.1 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@v1 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index edb7936d7..3a8d1d040 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -10,7 +10,7 @@ jobs: if: github.repository_owner == 'prometheus' steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.20.0 + - uses: bufbuild/buf-setup-action@v1.23.1 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@v1 From 0f4c27e2bf4ba28707f96df18486165688341e79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Taavi=20V=C3=A4=C3=A4n=C3=A4nen?= Date: Fri, 30 Jun 2023 23:12:24 +0300 Subject: [PATCH 231/632] discovery/openstack: Include instance image ID in labels MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds a new label to include the ID of the image that an instance is using. This can be used for example to filter a job to only include instances using a certain image as that image includes some exporter. Sometimes the image information isn't available, such as when the image is private and the user doesn't have the roles required to see it. In those cases we just don't set the label, as the rest of the information from the discovery provider can still be used. Signed-off-by: Taavi Väänänen --- discovery/openstack/instance.go | 11 +++++++++-- discovery/openstack/instance_test.go | 2 ++ docs/configuration/configuration.md | 1 + 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/discovery/openstack/instance.go b/discovery/openstack/instance.go index 2f7e99a07..b2fe1e787 100644 --- a/discovery/openstack/instance.go +++ b/discovery/openstack/instance.go @@ -36,6 +36,7 @@ const ( openstackLabelAddressPool = openstackLabelPrefix + "address_pool" openstackLabelInstanceFlavor = openstackLabelPrefix + "instance_flavor" openstackLabelInstanceID = openstackLabelPrefix + "instance_id" + openstackLabelInstanceImage = openstackLabelPrefix + "instance_image" openstackLabelInstanceName = openstackLabelPrefix + "instance_name" openstackLabelInstanceStatus = openstackLabelPrefix + "instance_status" openstackLabelPrivateIP = openstackLabelPrefix + "private_ip" @@ -144,12 +145,18 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, openstackLabelUserID: model.LabelValue(s.UserID), } - id, ok := s.Flavor["id"].(string) + flavorId, ok := s.Flavor["id"].(string) if !ok { level.Warn(i.logger).Log("msg", "Invalid type for flavor id, expected string") continue } - labels[openstackLabelInstanceFlavor] = model.LabelValue(id) + labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorId) + + imageId, ok := s.Image["id"].(string) + if ok { + labels[openstackLabelInstanceImage] = model.LabelValue(imageId) + } + for k, v := range s.Metadata { name := strutil.SanitizeLabelName(k) labels[openstackLabelTagPrefix+model.LabelName(name)] = model.LabelValue(v) diff --git a/discovery/openstack/instance_test.go b/discovery/openstack/instance_test.go index d47cb0020..d2da5d968 100644 --- a/discovery/openstack/instance_test.go +++ b/discovery/openstack/instance_test.go @@ -73,6 +73,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { "__address__": model.LabelValue("10.0.0.32:0"), "__meta_openstack_instance_flavor": model.LabelValue("1"), "__meta_openstack_instance_id": model.LabelValue("ef079b0c-e610-4dfb-b1aa-b49f07ac48e5"), + "__meta_openstack_instance_image": model.LabelValue("f90f6034-2570-4974-8351-6b49732ef2eb"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_name": model.LabelValue("herp"), "__meta_openstack_private_ip": model.LabelValue("10.0.0.32"), @@ -85,6 +86,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { "__address__": model.LabelValue("10.0.0.31:0"), "__meta_openstack_instance_flavor": model.LabelValue("1"), "__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682ba"), + "__meta_openstack_instance_image": model.LabelValue("f90f6034-2570-4974-8351-6b49732ef2eb"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_name": model.LabelValue("derp"), "__meta_openstack_private_ip": model.LabelValue("10.0.0.31"), diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index b094bb4ec..30bb07a8c 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -1276,6 +1276,7 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_openstack_address_pool`: the pool of the private IP. * `__meta_openstack_instance_flavor`: the flavor of the OpenStack instance. * `__meta_openstack_instance_id`: the OpenStack instance ID. +* `__meta_openstack_instance_image`: the ID of the image the OpenStack instance is using. * `__meta_openstack_instance_name`: the OpenStack instance name. * `__meta_openstack_instance_status`: the status of the OpenStack instance. * `__meta_openstack_private_ip`: the private IP of the OpenStack instance. From 323ac8344490413eab79d5e85ea05245998c2a32 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 4 Jul 2023 21:43:15 +0000 Subject: [PATCH 232/632] build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azidentity Bumps [github.com/Azure/azure-sdk-for-go/sdk/azidentity](https://github.com/Azure/azure-sdk-for-go) from 1.2.1 to 1.3.0. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azidentity/v1.2.1...sdk/azcore/v1.3.0) --- updated-dependencies: - dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azidentity dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index b3ad4d41b..48db76291 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,8 @@ go 1.19 require ( github.com/Azure/azure-sdk-for-go v65.0.0+incompatible - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 github.com/Azure/go-autorest/autorest v0.11.29 github.com/Azure/go-autorest/autorest/adal v0.9.23 github.com/alecthomas/kingpin/v2 v2.3.2 @@ -85,7 +85,7 @@ require ( require ( cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect diff --git a/go.sum b/go.sum index 9c0e3266a..af10deb8b 100644 --- a/go.sum +++ b/go.sum @@ -38,10 +38,10 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 h1:gVXuXcWd1i4C2Ruxe321aU+IKGaStvGB/S90PUPB/W8= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1/go.mod h1:DffdKW9RFqa5VgmsjUOsS7UE7eiA5iAvYUs63bhKQ0M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 h1:T8quHYlUGyb/oqtSTwqlCr1ilJHrDv+ZtpSfo+hm1BU= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1/go.mod h1:gLa1CL2RNE4s7M3yopJ/p0iq5DdY6Yv5ZUt9MTRZOQM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= @@ -66,8 +66,8 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 h1:oPdPEZFSbl7oSPEAIPMPBMUmiL+mqgzBJwM/9qYcwNg= -github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1/go.mod h1:4qFor3D/HDsvBME35Xy9rwW9DecL+M2sNw1ybjPtwA0= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= From ac32e19bccba790554bc4ee008cf54e0a53cfa3e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 4 Jul 2023 21:43:53 +0000 Subject: [PATCH 233/632] build(deps): bump github.com/prometheus/client_golang Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.15.0 to 1.16.0. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.15.0...v1.16.0) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 4 ++-- documentation/examples/remote_storage/go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 0dfb897ec..b4c8e077d 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -8,7 +8,7 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb v1.11.2 - github.com/prometheus/client_golang v1.15.1 + github.com/prometheus/client_golang v1.16.0 github.com/prometheus/common v0.44.0 github.com/prometheus/prometheus v0.45.0 github.com/stretchr/testify v1.8.4 @@ -44,7 +44,7 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 // indirect go.opentelemetry.io/otel v1.16.0 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 79e1595c3..2ade3a1b7 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -194,8 +194,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= -github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -213,8 +213,8 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/prometheus/prometheus v0.45.0 h1:O/uG+Nw4kNxx/jDPxmjsSDd+9Ohql6E7ZSY1x5x/0KI= github.com/prometheus/prometheus v0.45.0/go.mod h1:jC5hyO8ItJBnDWGecbEucMyXjzxGv9cxsxsjS9u5s1w= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= From 4851ced266886b7aa5c9667bd163f6e517b0f784 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc=20Tudur=C3=AD?= Date: Wed, 5 Jul 2023 11:44:13 +0200 Subject: [PATCH 234/632] tsdb: Support native histograms in snapshot on shutdown (#12258) Signed-off-by: Marc Tuduri --- tsdb/head_test.go | 63 ++++++++- tsdb/head_wal.go | 52 +++++-- tsdb/record/record.go | 310 ++++++++++++++++++++++-------------------- 3 files changed, 263 insertions(+), 162 deletions(-) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 8eb218b5a..282810620 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -3230,8 +3230,12 @@ func TestChunkSnapshot(t *testing.T) { numSeries := 10 expSeries := make(map[string][]tsdbutil.Sample) + expHist := make(map[string][]tsdbutil.Sample) + expFloatHist := make(map[string][]tsdbutil.Sample) expTombstones := make(map[storage.SeriesRef]tombstones.Intervals) expExemplars := make([]ex, 0) + histograms := tsdbutil.GenerateTestGaugeHistograms(481) + floatHistogram := tsdbutil.GenerateTestGaugeFloatHistograms(481) addExemplar := func(app storage.Appender, ref storage.SeriesRef, lbls labels.Labels, ts int64) { e := ex{ @@ -3250,9 +3254,21 @@ func TestChunkSnapshot(t *testing.T) { checkSamples := func() { q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64) require.NoError(t, err) - series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", ".*")) + series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*")) require.Equal(t, expSeries, series) } + checkHistograms := func() { + q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64) + require.NoError(t, err) + series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "hist", "baz.*")) + require.Equal(t, expHist, series) + } + checkFloatHistograms := func() { + q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64) + require.NoError(t, err) + series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "floathist", "bat.*")) + require.Equal(t, expFloatHist, series) + } checkTombstones := func() { tr, err := head.Tombstones() require.NoError(t, err) @@ -3301,6 +3317,8 @@ func TestChunkSnapshot(t *testing.T) { require.NoError(t, head.Init(math.MinInt64)) checkSamples() + checkHistograms() + checkFloatHistograms() checkTombstones() checkExemplars() } @@ -3311,6 +3329,11 @@ func TestChunkSnapshot(t *testing.T) { for i := 1; i <= numSeries; i++ { lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", i)) lblStr := lbls.String() + lblsHist := labels.FromStrings("hist", fmt.Sprintf("baz%d", i)) + lblsHistStr := lblsHist.String() + lblsFloatHist := labels.FromStrings("floathist", fmt.Sprintf("bat%d", i)) + lblsFloatHistStr := lblsFloatHist.String() + // 240 samples should m-map at least 1 chunk. for ts := int64(1); ts <= 240; ts++ { val := rand.Float64() @@ -3318,6 +3341,16 @@ func TestChunkSnapshot(t *testing.T) { ref, err := app.Append(0, lbls, ts, val) require.NoError(t, err) + hist := histograms[int(ts)] + expHist[lblsHistStr] = append(expHist[lblsHistStr], sample{ts, 0, hist, nil}) + _, err = app.AppendHistogram(0, lblsHist, ts, hist, nil) + require.NoError(t, err) + + floatHist := floatHistogram[int(ts)] + expFloatHist[lblsFloatHistStr] = append(expFloatHist[lblsFloatHistStr], sample{ts, 0, nil, floatHist}) + _, err = app.AppendHistogram(0, lblsFloatHist, ts, nil, floatHist) + require.NoError(t, err) + // Add an exemplar and to create multiple WAL records. if ts%10 == 0 { addExemplar(app, ref, lbls, ts) @@ -3371,6 +3404,11 @@ func TestChunkSnapshot(t *testing.T) { for i := 1; i <= numSeries; i++ { lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", i)) lblStr := lbls.String() + lblsHist := labels.FromStrings("hist", fmt.Sprintf("baz%d", i)) + lblsHistStr := lblsHist.String() + lblsFloatHist := labels.FromStrings("floathist", fmt.Sprintf("bat%d", i)) + lblsFloatHistStr := lblsFloatHist.String() + // 240 samples should m-map at least 1 chunk. for ts := int64(241); ts <= 480; ts++ { val := rand.Float64() @@ -3378,6 +3416,16 @@ func TestChunkSnapshot(t *testing.T) { ref, err := app.Append(0, lbls, ts, val) require.NoError(t, err) + hist := histograms[int(ts)] + expHist[lblsHistStr] = append(expHist[lblsHistStr], sample{ts, 0, hist, nil}) + _, err = app.AppendHistogram(0, lblsHist, ts, hist, nil) + require.NoError(t, err) + + floatHist := floatHistogram[int(ts)] + expFloatHist[lblsFloatHistStr] = append(expFloatHist[lblsFloatHistStr], sample{ts, 0, nil, floatHist}) + _, err = app.AppendHistogram(0, lblsFloatHist, ts, nil, floatHist) + require.NoError(t, err) + // Add an exemplar and to create multiple WAL records. if ts%10 == 0 { addExemplar(app, ref, lbls, ts) @@ -3468,6 +3516,19 @@ func TestSnapshotError(t *testing.T) { lbls := labels.FromStrings("foo", "bar") _, err := app.Append(0, lbls, 99, 99) require.NoError(t, err) + + // Add histograms + hist := tsdbutil.GenerateTestGaugeHistograms(1)[0] + floatHist := tsdbutil.GenerateTestGaugeFloatHistograms(1)[0] + lblsHist := labels.FromStrings("hist", "bar") + lblsFloatHist := labels.FromStrings("floathist", "bar") + + _, err = app.AppendHistogram(0, lblsHist, 99, hist, nil) + require.NoError(t, err) + + _, err = app.AppendHistogram(0, lblsFloatHist, 99, nil, floatHist) + require.NoError(t, err) + require.NoError(t, app.Commit()) // Add some tombstones. diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 71120c55e..2fe33befb 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -943,10 +943,12 @@ const ( ) type chunkSnapshotRecord struct { - ref chunks.HeadSeriesRef - lset labels.Labels - mc *memChunk - lastValue float64 + ref chunks.HeadSeriesRef + lset labels.Labels + mc *memChunk + lastValue float64 + lastHistogramValue *histogram.Histogram + lastFloatHistogramValue *histogram.FloatHistogram } func (s *memSeries) encodeToSnapshotRecord(b []byte) []byte { @@ -961,18 +963,27 @@ func (s *memSeries) encodeToSnapshotRecord(b []byte) []byte { if s.headChunk == nil { buf.PutUvarint(0) } else { + enc := s.headChunk.chunk.Encoding() buf.PutUvarint(1) buf.PutBE64int64(s.headChunk.minTime) buf.PutBE64int64(s.headChunk.maxTime) - buf.PutByte(byte(s.headChunk.chunk.Encoding())) + buf.PutByte(byte(enc)) buf.PutUvarintBytes(s.headChunk.chunk.Bytes()) - // Backwards compatibility for old sampleBuf which had last 4 samples. - for i := 0; i < 3; i++ { + + switch enc { + case chunkenc.EncXOR: + // Backwards compatibility for old sampleBuf which had last 4 samples. + for i := 0; i < 3; i++ { + buf.PutBE64int64(0) + buf.PutBEFloat64(0) + } buf.PutBE64int64(0) - buf.PutBEFloat64(0) + buf.PutBEFloat64(s.lastValue) + case chunkenc.EncHistogram: + record.EncodeHistogram(&buf, s.lastHistogramValue) + default: // chunkenc.FloatHistogram. + record.EncodeFloatHistogram(&buf, s.lastFloatHistogramValue) } - buf.PutBE64int64(0) - buf.PutBEFloat64(s.lastValue) } s.Unlock() @@ -1012,13 +1023,22 @@ func decodeSeriesFromChunkSnapshot(d *record.Decoder, b []byte) (csr chunkSnapsh } csr.mc.chunk = chk - // Backwards-compatibility for old sampleBuf which had last 4 samples. - for i := 0; i < 3; i++ { + switch enc { + case chunkenc.EncXOR: + // Backwards-compatibility for old sampleBuf which had last 4 samples. + for i := 0; i < 3; i++ { + _ = dec.Be64int64() + _ = dec.Be64Float64() + } _ = dec.Be64int64() - _ = dec.Be64Float64() + csr.lastValue = dec.Be64Float64() + case chunkenc.EncHistogram: + csr.lastHistogramValue = &histogram.Histogram{} + record.DecodeHistogram(&dec, csr.lastHistogramValue) + default: // chunkenc.FloatHistogram. + csr.lastFloatHistogramValue = &histogram.FloatHistogram{} + record.DecodeFloatHistogram(&dec, csr.lastFloatHistogramValue) } - _ = dec.Be64int64() - csr.lastValue = dec.Be64Float64() err = dec.Err() if err != nil && len(dec.B) > 0 { @@ -1396,6 +1416,8 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie series.nextAt = csr.mc.maxTime // This will create a new chunk on append. series.headChunk = csr.mc series.lastValue = csr.lastValue + series.lastHistogramValue = csr.lastHistogramValue + series.lastFloatHistogramValue = csr.lastFloatHistogramValue app, err := series.headChunk.chunk.Appender() if err != nil { diff --git a/tsdb/record/record.go b/tsdb/record/record.go index 231b8b3c1..4cd51d46c 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -441,49 +441,7 @@ func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) H: &histogram.Histogram{}, } - rh.H.CounterResetHint = histogram.CounterResetHint(dec.Byte()) - - rh.H.Schema = int32(dec.Varint64()) - rh.H.ZeroThreshold = math.Float64frombits(dec.Be64()) - - rh.H.ZeroCount = dec.Uvarint64() - rh.H.Count = dec.Uvarint64() - rh.H.Sum = math.Float64frombits(dec.Be64()) - - l := dec.Uvarint() - if l > 0 { - rh.H.PositiveSpans = make([]histogram.Span, l) - } - for i := range rh.H.PositiveSpans { - rh.H.PositiveSpans[i].Offset = int32(dec.Varint64()) - rh.H.PositiveSpans[i].Length = dec.Uvarint32() - } - - l = dec.Uvarint() - if l > 0 { - rh.H.NegativeSpans = make([]histogram.Span, l) - } - for i := range rh.H.NegativeSpans { - rh.H.NegativeSpans[i].Offset = int32(dec.Varint64()) - rh.H.NegativeSpans[i].Length = dec.Uvarint32() - } - - l = dec.Uvarint() - if l > 0 { - rh.H.PositiveBuckets = make([]int64, l) - } - for i := range rh.H.PositiveBuckets { - rh.H.PositiveBuckets[i] = dec.Varint64() - } - - l = dec.Uvarint() - if l > 0 { - rh.H.NegativeBuckets = make([]int64, l) - } - for i := range rh.H.NegativeBuckets { - rh.H.NegativeBuckets[i] = dec.Varint64() - } - + DecodeHistogram(&dec, rh.H) histograms = append(histograms, rh) } @@ -496,6 +454,52 @@ func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) return histograms, nil } +// DecodeHistogram decodes a Histogram from a byte slice. +func DecodeHistogram(buf *encoding.Decbuf, h *histogram.Histogram) { + h.CounterResetHint = histogram.CounterResetHint(buf.Byte()) + + h.Schema = int32(buf.Varint64()) + h.ZeroThreshold = math.Float64frombits(buf.Be64()) + + h.ZeroCount = buf.Uvarint64() + h.Count = buf.Uvarint64() + h.Sum = math.Float64frombits(buf.Be64()) + + l := buf.Uvarint() + if l > 0 { + h.PositiveSpans = make([]histogram.Span, l) + } + for i := range h.PositiveSpans { + h.PositiveSpans[i].Offset = int32(buf.Varint64()) + h.PositiveSpans[i].Length = buf.Uvarint32() + } + + l = buf.Uvarint() + if l > 0 { + h.NegativeSpans = make([]histogram.Span, l) + } + for i := range h.NegativeSpans { + h.NegativeSpans[i].Offset = int32(buf.Varint64()) + h.NegativeSpans[i].Length = buf.Uvarint32() + } + + l = buf.Uvarint() + if l > 0 { + h.PositiveBuckets = make([]int64, l) + } + for i := range h.PositiveBuckets { + h.PositiveBuckets[i] = buf.Varint64() + } + + l = buf.Uvarint() + if l > 0 { + h.NegativeBuckets = make([]int64, l) + } + for i := range h.NegativeBuckets { + h.NegativeBuckets[i] = buf.Varint64() + } +} + func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogramSample) ([]RefFloatHistogramSample, error) { dec := encoding.Decbuf{B: rec} t := Type(dec.Byte()) @@ -519,49 +523,7 @@ func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogr FH: &histogram.FloatHistogram{}, } - rh.FH.CounterResetHint = histogram.CounterResetHint(dec.Byte()) - - rh.FH.Schema = int32(dec.Varint64()) - rh.FH.ZeroThreshold = dec.Be64Float64() - - rh.FH.ZeroCount = dec.Be64Float64() - rh.FH.Count = dec.Be64Float64() - rh.FH.Sum = dec.Be64Float64() - - l := dec.Uvarint() - if l > 0 { - rh.FH.PositiveSpans = make([]histogram.Span, l) - } - for i := range rh.FH.PositiveSpans { - rh.FH.PositiveSpans[i].Offset = int32(dec.Varint64()) - rh.FH.PositiveSpans[i].Length = dec.Uvarint32() - } - - l = dec.Uvarint() - if l > 0 { - rh.FH.NegativeSpans = make([]histogram.Span, l) - } - for i := range rh.FH.NegativeSpans { - rh.FH.NegativeSpans[i].Offset = int32(dec.Varint64()) - rh.FH.NegativeSpans[i].Length = dec.Uvarint32() - } - - l = dec.Uvarint() - if l > 0 { - rh.FH.PositiveBuckets = make([]float64, l) - } - for i := range rh.FH.PositiveBuckets { - rh.FH.PositiveBuckets[i] = dec.Be64Float64() - } - - l = dec.Uvarint() - if l > 0 { - rh.FH.NegativeBuckets = make([]float64, l) - } - for i := range rh.FH.NegativeBuckets { - rh.FH.NegativeBuckets[i] = dec.Be64Float64() - } - + DecodeFloatHistogram(&dec, rh.FH) histograms = append(histograms, rh) } @@ -574,6 +536,52 @@ func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogr return histograms, nil } +// Decode decodes a Histogram from a byte slice. +func DecodeFloatHistogram(buf *encoding.Decbuf, fh *histogram.FloatHistogram) { + fh.CounterResetHint = histogram.CounterResetHint(buf.Byte()) + + fh.Schema = int32(buf.Varint64()) + fh.ZeroThreshold = buf.Be64Float64() + + fh.ZeroCount = buf.Be64Float64() + fh.Count = buf.Be64Float64() + fh.Sum = buf.Be64Float64() + + l := buf.Uvarint() + if l > 0 { + fh.PositiveSpans = make([]histogram.Span, l) + } + for i := range fh.PositiveSpans { + fh.PositiveSpans[i].Offset = int32(buf.Varint64()) + fh.PositiveSpans[i].Length = buf.Uvarint32() + } + + l = buf.Uvarint() + if l > 0 { + fh.NegativeSpans = make([]histogram.Span, l) + } + for i := range fh.NegativeSpans { + fh.NegativeSpans[i].Offset = int32(buf.Varint64()) + fh.NegativeSpans[i].Length = buf.Uvarint32() + } + + l = buf.Uvarint() + if l > 0 { + fh.PositiveBuckets = make([]float64, l) + } + for i := range fh.PositiveBuckets { + fh.PositiveBuckets[i] = buf.Be64Float64() + } + + l = buf.Uvarint() + if l > 0 { + fh.NegativeBuckets = make([]float64, l) + } + for i := range fh.NegativeBuckets { + fh.NegativeBuckets[i] = buf.Be64Float64() + } +} + // Encoder encodes series, sample, and tombstones records. // The zero value is ready to use. type Encoder struct{} @@ -719,41 +727,46 @@ func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) [] buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) buf.PutVarint64(h.T - first.T) - buf.PutByte(byte(h.H.CounterResetHint)) - - buf.PutVarint64(int64(h.H.Schema)) - buf.PutBE64(math.Float64bits(h.H.ZeroThreshold)) - - buf.PutUvarint64(h.H.ZeroCount) - buf.PutUvarint64(h.H.Count) - buf.PutBE64(math.Float64bits(h.H.Sum)) - - buf.PutUvarint(len(h.H.PositiveSpans)) - for _, s := range h.H.PositiveSpans { - buf.PutVarint64(int64(s.Offset)) - buf.PutUvarint32(s.Length) - } - - buf.PutUvarint(len(h.H.NegativeSpans)) - for _, s := range h.H.NegativeSpans { - buf.PutVarint64(int64(s.Offset)) - buf.PutUvarint32(s.Length) - } - - buf.PutUvarint(len(h.H.PositiveBuckets)) - for _, b := range h.H.PositiveBuckets { - buf.PutVarint64(b) - } - - buf.PutUvarint(len(h.H.NegativeBuckets)) - for _, b := range h.H.NegativeBuckets { - buf.PutVarint64(b) - } + EncodeHistogram(&buf, h.H) } return buf.Get() } +// EncodeHistogram encodes a Histogram into a byte slice. +func EncodeHistogram(buf *encoding.Encbuf, h *histogram.Histogram) { + buf.PutByte(byte(h.CounterResetHint)) + + buf.PutVarint64(int64(h.Schema)) + buf.PutBE64(math.Float64bits(h.ZeroThreshold)) + + buf.PutUvarint64(h.ZeroCount) + buf.PutUvarint64(h.Count) + buf.PutBE64(math.Float64bits(h.Sum)) + + buf.PutUvarint(len(h.PositiveSpans)) + for _, s := range h.PositiveSpans { + buf.PutVarint64(int64(s.Offset)) + buf.PutUvarint32(s.Length) + } + + buf.PutUvarint(len(h.NegativeSpans)) + for _, s := range h.NegativeSpans { + buf.PutVarint64(int64(s.Offset)) + buf.PutUvarint32(s.Length) + } + + buf.PutUvarint(len(h.PositiveBuckets)) + for _, b := range h.PositiveBuckets { + buf.PutVarint64(b) + } + + buf.PutUvarint(len(h.NegativeBuckets)) + for _, b := range h.NegativeBuckets { + buf.PutVarint64(b) + } +} + func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte { buf := encoding.Encbuf{B: b} buf.PutByte(byte(FloatHistogramSamples)) @@ -772,37 +785,42 @@ func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) buf.PutVarint64(h.T - first.T) - buf.PutByte(byte(h.FH.CounterResetHint)) - - buf.PutVarint64(int64(h.FH.Schema)) - buf.PutBEFloat64(h.FH.ZeroThreshold) - - buf.PutBEFloat64(h.FH.ZeroCount) - buf.PutBEFloat64(h.FH.Count) - buf.PutBEFloat64(h.FH.Sum) - - buf.PutUvarint(len(h.FH.PositiveSpans)) - for _, s := range h.FH.PositiveSpans { - buf.PutVarint64(int64(s.Offset)) - buf.PutUvarint32(s.Length) - } - - buf.PutUvarint(len(h.FH.NegativeSpans)) - for _, s := range h.FH.NegativeSpans { - buf.PutVarint64(int64(s.Offset)) - buf.PutUvarint32(s.Length) - } - - buf.PutUvarint(len(h.FH.PositiveBuckets)) - for _, b := range h.FH.PositiveBuckets { - buf.PutBEFloat64(b) - } - - buf.PutUvarint(len(h.FH.NegativeBuckets)) - for _, b := range h.FH.NegativeBuckets { - buf.PutBEFloat64(b) - } + EncodeFloatHistogram(&buf, h.FH) } return buf.Get() } + +// Encode encodes the Float Histogram into a byte slice. +func EncodeFloatHistogram(buf *encoding.Encbuf, h *histogram.FloatHistogram) { + buf.PutByte(byte(h.CounterResetHint)) + + buf.PutVarint64(int64(h.Schema)) + buf.PutBEFloat64(h.ZeroThreshold) + + buf.PutBEFloat64(h.ZeroCount) + buf.PutBEFloat64(h.Count) + buf.PutBEFloat64(h.Sum) + + buf.PutUvarint(len(h.PositiveSpans)) + for _, s := range h.PositiveSpans { + buf.PutVarint64(int64(s.Offset)) + buf.PutUvarint32(s.Length) + } + + buf.PutUvarint(len(h.NegativeSpans)) + for _, s := range h.NegativeSpans { + buf.PutVarint64(int64(s.Offset)) + buf.PutUvarint32(s.Length) + } + + buf.PutUvarint(len(h.PositiveBuckets)) + for _, b := range h.PositiveBuckets { + buf.PutBEFloat64(b) + } + + buf.PutUvarint(len(h.NegativeBuckets)) + for _, b := range h.NegativeBuckets { + buf.PutBEFloat64(b) + } +} From 4edf8999da266339c0a27172976de5fd0de53596 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Thu, 6 Jul 2023 10:59:03 +0200 Subject: [PATCH 235/632] Update promu Update promu to support riscv64. Signed-off-by: SuperQ --- Makefile.common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.common b/Makefile.common index 787feff08..0ce7ea461 100644 --- a/Makefile.common +++ b/Makefile.common @@ -55,7 +55,7 @@ ifneq ($(shell command -v gotestsum > /dev/null),) endif endif -PROMU_VERSION ?= 0.14.0 +PROMU_VERSION ?= 0.15.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz SKIP_GOLANGCI_LINT := From 7cc4292328e69c8c7c51ead620609ca26b735f6e Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Thu, 6 Jul 2023 17:48:13 +0200 Subject: [PATCH 236/632] Export MinTime and MaxTime Signed-off-by: Marco Pracucci --- web/api/v1/api.go | 37 +++++++++++++++++++++---------------- web/api/v1/api_test.go | 16 ++++++++-------- 2 files changed, 29 insertions(+), 24 deletions(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 8a6e6efdd..fe343aab1 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -561,11 +561,11 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) { } func (api *API) queryExemplars(r *http.Request) apiFuncResult { - start, err := parseTimeParam(r, "start", minTime) + start, err := parseTimeParam(r, "start", MinTime) if err != nil { return invalidParamError(err, "start") } - end, err := parseTimeParam(r, "end", maxTime) + end, err := parseTimeParam(r, "end", MaxTime) if err != nil { return invalidParamError(err, "end") } @@ -625,11 +625,11 @@ func returnAPIError(err error) *apiError { } func (api *API) labelNames(r *http.Request) apiFuncResult { - start, err := parseTimeParam(r, "start", minTime) + start, err := parseTimeParam(r, "start", MinTime) if err != nil { return invalidParamError(err, "start") } - end, err := parseTimeParam(r, "end", maxTime) + end, err := parseTimeParam(r, "end", MaxTime) if err != nil { return invalidParamError(err, "end") } @@ -691,11 +691,11 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { return apiFuncResult{nil, &apiError{errorBadData, errors.Errorf("invalid label name: %q", name)}, nil, nil} } - start, err := parseTimeParam(r, "start", minTime) + start, err := parseTimeParam(r, "start", MinTime) if err != nil { return invalidParamError(err, "start") } - end, err := parseTimeParam(r, "end", maxTime) + end, err := parseTimeParam(r, "end", MaxTime) if err != nil { return invalidParamError(err, "end") } @@ -760,11 +760,16 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { } var ( - minTime = time.Unix(math.MinInt64/1000+62135596801, 0).UTC() - maxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC() + // MinTime is the default timestamp used for the begin of optional time ranges. + // Exposed to let downstream projects to reference it. + MinTime = time.Unix(math.MinInt64/1000+62135596801, 0).UTC() - minTimeFormatted = minTime.Format(time.RFC3339Nano) - maxTimeFormatted = maxTime.Format(time.RFC3339Nano) + // MaxTime is the default timestamp used for the end of optional time ranges. + // Exposed to let downstream projects to reference it. + MaxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC() + + minTimeFormatted = MinTime.Format(time.RFC3339Nano) + maxTimeFormatted = MaxTime.Format(time.RFC3339Nano) ) func (api *API) series(r *http.Request) (result apiFuncResult) { @@ -775,11 +780,11 @@ func (api *API) series(r *http.Request) (result apiFuncResult) { return apiFuncResult{nil, &apiError{errorBadData, errors.New("no match[] parameter provided")}, nil, nil} } - start, err := parseTimeParam(r, "start", minTime) + start, err := parseTimeParam(r, "start", MinTime) if err != nil { return invalidParamError(err, "start") } - end, err := parseTimeParam(r, "end", maxTime) + end, err := parseTimeParam(r, "end", MaxTime) if err != nil { return invalidParamError(err, "end") } @@ -1579,11 +1584,11 @@ func (api *API) deleteSeries(r *http.Request) apiFuncResult { return apiFuncResult{nil, &apiError{errorBadData, errors.New("no match[] parameter provided")}, nil, nil} } - start, err := parseTimeParam(r, "start", minTime) + start, err := parseTimeParam(r, "start", MinTime) if err != nil { return invalidParamError(err, "start") } - end, err := parseTimeParam(r, "end", maxTime) + end, err := parseTimeParam(r, "end", MaxTime) if err != nil { return invalidParamError(err, "end") } @@ -1738,9 +1743,9 @@ func parseTime(s string) (time.Time, error) { // Upstream issue: https://github.com/golang/go/issues/20555 switch s { case minTimeFormatted: - return minTime, nil + return MinTime, nil case maxTimeFormatted: - return maxTime, nil + return MaxTime, nil } return time.Time{}, errors.Errorf("cannot parse %q to a valid timestamp", s) } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 3aa10ee44..9105ce336 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -3059,7 +3059,7 @@ func TestParseTimeParam(t *testing.T) { { // When data is valid. paramName: "start", paramValue: "1582468023986", - defaultValue: minTime, + defaultValue: MinTime, result: resultType{ asTime: ts, asError: nil, @@ -3068,16 +3068,16 @@ func TestParseTimeParam(t *testing.T) { { // When data is empty string. paramName: "end", paramValue: "", - defaultValue: maxTime, + defaultValue: MaxTime, result: resultType{ - asTime: maxTime, + asTime: MaxTime, asError: nil, }, }, { // When data is not valid. paramName: "foo", paramValue: "baz", - defaultValue: maxTime, + defaultValue: MaxTime, result: resultType{ asTime: time.Time{}, asError: func() error { @@ -3148,12 +3148,12 @@ func TestParseTime(t *testing.T) { result: time.Unix(1543578564, 705*1e6), }, { - input: minTime.Format(time.RFC3339Nano), - result: minTime, + input: MinTime.Format(time.RFC3339Nano), + result: MinTime, }, { - input: maxTime.Format(time.RFC3339Nano), - result: maxTime, + input: MaxTime.Format(time.RFC3339Nano), + result: MaxTime, }, } From 578e2b6a3f70384cb7908e8bc7d50319e59ac199 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sat, 8 Jul 2023 12:39:33 +0000 Subject: [PATCH 237/632] re-order imports for linter Signed-off-by: Bryan Boreham --- cmd/promtool/tsdb.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 844a387c4..4e27f69c0 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -29,20 +29,19 @@ import ( "text/tabwriter" "time" - "github.com/prometheus/prometheus/promql/parser" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/prometheus/prometheus/tsdb/index" - "golang.org/x/exp/slices" - "github.com/alecthomas/units" "github.com/go-kit/log" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" + "github.com/prometheus/prometheus/tsdb/index" ) const timeDelta = 30000 From c5a37ddad54762a1eaf80c820d1fff6d288bef4a Mon Sep 17 00:00:00 2001 From: haleyao Date: Sun, 9 Jul 2023 21:33:31 +0800 Subject: [PATCH 238/632] Remove deleted target from discovery manager Signed-off-by: haleyao --- discovery/legacymanager/manager.go | 7 ++++++- discovery/legacymanager/manager_test.go | 10 +++------- discovery/manager.go | 7 ++++++- discovery/manager_test.go | 15 ++------------- 4 files changed, 17 insertions(+), 22 deletions(-) diff --git a/discovery/legacymanager/manager.go b/discovery/legacymanager/manager.go index 87823f401..e7c79a8f8 100644 --- a/discovery/legacymanager/manager.go +++ b/discovery/legacymanager/manager.go @@ -270,7 +270,12 @@ func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { } for _, tg := range tgs { if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. - m.targets[poolKey][tg.Source] = tg + // Remove the deleted target. + if len(tg.Targets) == 0 && len(tg.Labels) == 0 { + delete(m.targets[poolKey], tg.Source) + } else { + m.targets[poolKey][tg.Source] = tg + } } } } diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go index 13b84e6e3..bc8a419ec 100644 --- a/discovery/legacymanager/manager_test.go +++ b/discovery/legacymanager/manager_test.go @@ -824,13 +824,9 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { if !ok { t.Fatalf("'%v' should be present in target groups", pkey) } - group, ok := targetGroups[""] - if !ok { - t.Fatalf("missing '' key in target groups %v", targetGroups) - } - - if len(group.Targets) != 0 { - t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets)) + _, ok = targetGroups[""] + if ok { + t.Fatalf("Target groups should be empty, got %v", targetGroups) } } diff --git a/discovery/manager.go b/discovery/manager.go index 8b304a0fa..7f06b423d 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -387,7 +387,12 @@ func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { } for _, tg := range tgs { if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. - m.targets[poolKey][tg.Source] = tg + // Remove the deleted target. + if len(tg.Targets) == 0 && len(tg.Labels) == 0 { + delete(m.targets[poolKey], tg.Source) + } else { + m.targets[poolKey][tg.Source] = tg + } } } } diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 537160811..67ccbcac7 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -1044,19 +1044,8 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { if !ok { t.Fatalf("'%v' should be present in target groups", p) } - group, ok := targetGroups[""] - if !ok { - t.Fatalf("missing '' key in target groups %v", targetGroups) - } - - if len(group.Targets) != 0 { - t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets)) - } - require.Equal(t, 1, len(syncedTargets)) - require.Equal(t, 1, len(syncedTargets["prometheus"])) - if lbls := syncedTargets["prometheus"][0].Labels; lbls != nil { - t.Fatalf("Unexpected Group: expected nil Labels, got %v", lbls) - } + require.Equal(t, 0, len(targetGroups)) + require.Equal(t, 0, len(syncedTargets)) } func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { From ce153e3fffde388712629d205663dd4af55013db Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sat, 8 Jul 2023 12:45:56 +0000 Subject: [PATCH 239/632] Replace sort.Sort with faster slices.SortFunc The generic version is more efficient. Signed-off-by: Bryan Boreham --- promql/quantile.go | 10 +++++----- storage/remote/codec.go | 11 ++++------- tsdb/head_read.go | 2 +- tsdb/index/index.go | 8 +------- tsdb/ooo_head_read.go | 31 +++++++++++-------------------- tsdb/ooo_head_read_test.go | 7 ++++--- util/stats/timer.go | 33 ++++++++------------------------- web/federate.go | 19 ++++++------------- 8 files changed, 40 insertions(+), 81 deletions(-) diff --git a/promql/quantile.go b/promql/quantile.go index 78d0bbaf0..d80345e81 100644 --- a/promql/quantile.go +++ b/promql/quantile.go @@ -17,6 +17,8 @@ import ( "math" "sort" + "golang.org/x/exp/slices" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" ) @@ -38,10 +40,6 @@ type bucket struct { // buckets implements sort.Interface. type buckets []bucket -func (b buckets) Len() int { return len(b) } -func (b buckets) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b buckets) Less(i, j int) bool { return b[i].upperBound < b[j].upperBound } - type metricWithBuckets struct { metric labels.Labels buckets buckets @@ -83,7 +81,9 @@ func bucketQuantile(q float64, buckets buckets) float64 { if q > 1 { return math.Inf(+1) } - sort.Sort(buckets) + slices.SortFunc(buckets, func(a, b bucket) bool { + return a.upperBound < b.upperBound + }) if !math.IsInf(buckets[len(buckets)-1].upperBound, +1) { return math.NaN() } diff --git a/storage/remote/codec.go b/storage/remote/codec.go index 6a58ec4ac..3f426204e 100644 --- a/storage/remote/codec.go +++ b/storage/remote/codec.go @@ -26,6 +26,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/common/model" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" @@ -178,7 +179,9 @@ func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet } if sortSeries { - sort.Sort(byLabel(series)) + slices.SortFunc(series, func(a, b storage.Series) bool { + return labels.Compare(a.Labels(), b.Labels()) < 0 + }) } return &concreteSeriesSet{ series: series, @@ -313,12 +316,6 @@ func MergeLabels(primary, secondary []prompb.Label) []prompb.Label { return result } -type byLabel []storage.Series - -func (a byLabel) Len() int { return len(a) } -func (a byLabel) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byLabel) Less(i, j int) bool { return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 } - // errSeriesSet implements storage.SeriesSet, just returning an error. type errSeriesSet struct { err error diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 0e6e005ea..8d0a35fc2 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -450,7 +450,7 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper // Next we want to sort all the collected chunks by min time so we can find // those that overlap and stop when we know the rest don't. - sort.Sort(byMinTimeAndMinRef(tmpChks)) + slices.SortFunc(tmpChks, refLessByMinTimeAndMinRef) mc := &mergedOOOChunks{} absoluteMax := int64(math.MinInt64) diff --git a/tsdb/index/index.go b/tsdb/index/index.go index ef2d167dc..d7650a8b3 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -924,7 +924,7 @@ func (w *Writer) writePostingsToTmpFiles() error { values = append(values, v) } // Symbol numbers are in order, so the strings will also be in order. - sort.Sort(uint32slice(values)) + slices.Sort(values) for _, v := range values { value, err := w.symbols.Lookup(v) if err != nil { @@ -1017,12 +1017,6 @@ func (w *Writer) writePostings() error { return nil } -type uint32slice []uint32 - -func (s uint32slice) Len() int { return len(s) } -func (s uint32slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s uint32slice) Less(i, j int) bool { return s[i] < s[j] } - type labelIndexHashEntry struct { keys []string offset uint64 diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 8ba3ea39a..8030fc367 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -17,7 +17,8 @@ package tsdb import ( "errors" "math" - "sort" + + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" @@ -130,7 +131,7 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra // Next we want to sort all the collected chunks by min time so we can find // those that overlap. - sort.Sort(metaByMinTimeAndMinRef(tmpChks)) + slices.SortFunc(tmpChks, lessByMinTimeAndMinRef) // Next we want to iterate the sorted collected chunks and only return the // chunks Meta the first chunk that overlaps with others. @@ -175,30 +176,20 @@ type chunkMetaAndChunkDiskMapperRef struct { origMaxT int64 } -type byMinTimeAndMinRef []chunkMetaAndChunkDiskMapperRef - -func (b byMinTimeAndMinRef) Len() int { return len(b) } -func (b byMinTimeAndMinRef) Less(i, j int) bool { - if b[i].meta.MinTime == b[j].meta.MinTime { - return b[i].meta.Ref < b[j].meta.Ref +func refLessByMinTimeAndMinRef(a, b chunkMetaAndChunkDiskMapperRef) bool { + if a.meta.MinTime == b.meta.MinTime { + return a.meta.Ref < b.meta.Ref } - return b[i].meta.MinTime < b[j].meta.MinTime + return a.meta.MinTime < b.meta.MinTime } -func (b byMinTimeAndMinRef) Swap(i, j int) { b[i], b[j] = b[j], b[i] } - -type metaByMinTimeAndMinRef []chunks.Meta - -func (b metaByMinTimeAndMinRef) Len() int { return len(b) } -func (b metaByMinTimeAndMinRef) Less(i, j int) bool { - if b[i].MinTime == b[j].MinTime { - return b[i].Ref < b[j].Ref +func lessByMinTimeAndMinRef(a, b chunks.Meta) bool { + if a.MinTime == b.MinTime { + return a.Ref < b.Ref } - return b[i].MinTime < b[j].MinTime + return a.MinTime < b.MinTime } -func (b metaByMinTimeAndMinRef) Swap(i, j int) { b[i], b[j] = b[j], b[i] } - func (oh *OOOHeadIndexReader) Postings(name string, values ...string) (index.Postings, error) { switch len(values) { case 0: diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index f3ec862f5..ed9ca2769 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -22,6 +22,7 @@ import ( "time" "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" @@ -337,7 +338,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { } expChunks = append(expChunks, meta) } - sort.Sort(metaByMinTimeAndMinRef(expChunks)) // we always want the chunks to come back sorted by minTime asc + slices.SortFunc(expChunks, lessByMinTimeAndMinRef) // We always want the chunks to come back sorted by minTime asc. if headChunk && len(intervals) > 0 { // Put the last interval in the head chunk @@ -1116,7 +1117,7 @@ func TestSortByMinTimeAndMinRef(t *testing.T) { for _, tc := range tests { t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) { - sort.Sort(byMinTimeAndMinRef(tc.input)) + slices.SortFunc(tc.input, refLessByMinTimeAndMinRef) require.Equal(t, tc.exp, tc.input) }) } @@ -1180,7 +1181,7 @@ func TestSortMetaByMinTimeAndMinRef(t *testing.T) { for _, tc := range tests { t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) { - sort.Sort(metaByMinTimeAndMinRef(tc.inputMetas)) + slices.SortFunc(tc.inputMetas, lessByMinTimeAndMinRef) require.Equal(t, tc.expMetas, tc.inputMetas) }) } diff --git a/util/stats/timer.go b/util/stats/timer.go index e47162680..df1e2f931 100644 --- a/util/stats/timer.go +++ b/util/stats/timer.go @@ -16,8 +16,9 @@ package stats import ( "bytes" "fmt" - "sort" "time" + + "golang.org/x/exp/slices" ) // A Timer that can be started and stopped and accumulates the total time it @@ -78,35 +79,17 @@ func (t *TimerGroup) GetTimer(name fmt.Stringer) *Timer { return timer } -// Timers is a slice of Timer pointers that implements Len and Swap from -// sort.Interface. -type Timers []*Timer - -type byCreationTimeSorter struct{ Timers } - -// Len implements sort.Interface. -func (t Timers) Len() int { - return len(t) -} - -// Swap implements sort.Interface. -func (t Timers) Swap(i, j int) { - t[i], t[j] = t[j], t[i] -} - -func (s byCreationTimeSorter) Less(i, j int) bool { - return s.Timers[i].created < s.Timers[j].created -} - // Return a string representation of a TimerGroup. func (t *TimerGroup) String() string { - timers := byCreationTimeSorter{} + timers := make([]*Timer, 0, len(t.timers)) for _, timer := range t.timers { - timers.Timers = append(timers.Timers, timer) + timers = append(timers, timer) } - sort.Sort(timers) + slices.SortFunc(timers, func(a, b *Timer) bool { + return a.created < b.created + }) result := &bytes.Buffer{} - for _, timer := range timers.Timers { + for _, timer := range timers { fmt.Fprintf(result, "%s\n", timer) } return result.String() diff --git a/web/federate.go b/web/federate.go index b0f4c0610..1c50faed0 100644 --- a/web/federate.go +++ b/web/federate.go @@ -25,6 +25,7 @@ import ( dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -166,7 +167,11 @@ Loop: return } - sort.Sort(byName(vec)) + slices.SortFunc(vec, func(a, b promql.Sample) bool { + ni := a.Metric.Get(labels.MetricName) + nj := b.Metric.Get(labels.MetricName) + return ni < nj + }) externalLabels := h.config.GlobalConfig.ExternalLabels.Map() if _, ok := externalLabels[model.InstanceLabel]; !ok { @@ -313,15 +318,3 @@ Loop: } } } - -// byName makes a model.Vector sortable by metric name. -type byName promql.Vector - -func (vec byName) Len() int { return len(vec) } -func (vec byName) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } - -func (vec byName) Less(i, j int) bool { - ni := vec[i].Metric.Get(labels.MetricName) - nj := vec[j].Metric.Get(labels.MetricName) - return ni < nj -} From 70e41fc5ac008d623820dcacdbbcbedbd64ca11f Mon Sep 17 00:00:00 2001 From: Merrick Clay Date: Mon, 10 Jul 2023 16:50:16 -0600 Subject: [PATCH 240/632] improve incorrect doc comment Signed-off-by: Merrick Clay --- tsdb/head.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsdb/head.go b/tsdb/head.go index a1d61fd6a..499be067a 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -977,7 +977,7 @@ func (h *Head) DisableNativeHistograms() { h.opts.EnableNativeHistograms.Store(false) } -// PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names. +// PostingsCardinalityStats returns highest cardinality stats by label and value names. func (h *Head) PostingsCardinalityStats(statsByLabelName string, limit int) *index.PostingsStats { h.cardinalityMutex.Lock() defer h.cardinalityMutex.Unlock() From fc2e4cd3b9ca1d924dce714b091c9bd5c97926fa Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 11 Jul 2023 13:13:35 +0200 Subject: [PATCH 241/632] docs: Fix link to feature flags. Signed-off-by: Julien Pivotto --- docs/querying/basics.md | 2 +- docs/querying/operators.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/querying/basics.md b/docs/querying/basics.md index c3aa7b804..5e9ddc119 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -35,7 +35,7 @@ vector is the only type that can be directly graphed. _Notes about the experimental native histograms:_ * Ingesting native histograms has to be enabled via a [feature - flag](../../feature_flags/#native-histograms). + flag](../feature_flags.md#native-histograms). * Once native histograms have been ingested into the TSDB (and even after disabling the feature flag again), both instant vectors and range vectors may now contain samples that aren't simple floating point numbers (float samples) diff --git a/docs/querying/operators.md b/docs/querying/operators.md index 0cd536894..5eb549051 100644 --- a/docs/querying/operators.md +++ b/docs/querying/operators.md @@ -310,7 +310,7 @@ so `2 ^ 3 ^ 2` is equivalent to `2 ^ (3 ^ 2)`. ## Operators for native histograms Native histograms are an experimental feature. Ingesting native histograms has -to be enabled via a [feature flag](../feature_flags/#native-histograms). Once +to be enabled via a [feature flag](../feature_flags.md#native-histograms). Once native histograms have been ingested, they can be queried (even after the feature flag has been disabled again). However, the operator support for native histograms is still very limited. From bf5bf1a4b3460fb35ef0a93f783a5251317c96f7 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 11 Jul 2023 14:29:31 +0200 Subject: [PATCH 242/632] TSDB: Remove usused import of sort Signed-off-by: Julien Pivotto --- tsdb/head_read.go | 1 - 1 file changed, 1 deletion(-) diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 55e16ad12..b2af74ace 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -16,7 +16,6 @@ package tsdb import ( "context" "math" - "sort" "sync" "github.com/go-kit/log/level" From 32d87282addfaa2af9f7a6b953d17f3da6101d66 Mon Sep 17 00:00:00 2001 From: Justin Lei Date: Tue, 11 Jul 2023 05:57:57 -0700 Subject: [PATCH 243/632] Add Zstandard compression option for wlog (#11666) Snappy remains as the default compression but there is now a flag to switch the compression algorithm. Signed-off-by: Justin Lei --- cmd/prometheus/main.go | 13 ++- go.mod | 1 + go.sum | 2 + tsdb/agent/db.go | 10 ++- tsdb/compact_test.go | 7 +- tsdb/db.go | 6 +- tsdb/db_test.go | 12 +-- tsdb/head_test.go | 164 +++++++++++++++++------------------ tsdb/head_wal.go | 2 +- tsdb/ooo_head_read_test.go | 5 +- tsdb/wal.go | 2 +- tsdb/wal_test.go | 4 +- tsdb/wlog/checkpoint.go | 2 +- tsdb/wlog/checkpoint_test.go | 6 +- tsdb/wlog/live_reader.go | 53 ++++++----- tsdb/wlog/reader.go | 36 +++++--- tsdb/wlog/reader_test.go | 14 +-- tsdb/wlog/watcher_test.go | 28 +++--- tsdb/wlog/wlog.go | 90 ++++++++++++++----- tsdb/wlog/wlog_test.go | 52 ++++++----- 20 files changed, 302 insertions(+), 207 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 3d723f152..debc0d3f9 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -71,6 +71,7 @@ import ( "github.com/prometheus/prometheus/tracing" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/agent" + "github.com/prometheus/prometheus/tsdb/wlog" "github.com/prometheus/prometheus/util/documentcli" "github.com/prometheus/prometheus/util/logging" prom_runtime "github.com/prometheus/prometheus/util/runtime" @@ -334,6 +335,9 @@ func main() { serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL."). Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression) + serverOnlyFlag(a, "storage.tsdb.wal-compression-type", "Compression algorithm for the tsdb WAL."). + Hidden().Default(string(wlog.CompressionSnappy)).EnumVar(&cfg.tsdb.WALCompressionType, string(wlog.CompressionSnappy), string(wlog.CompressionZstd)) + serverOnlyFlag(a, "storage.tsdb.head-chunks-write-queue-size", "Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental."). Default("0").IntVar(&cfg.tsdb.HeadChunksWriteQueueSize) @@ -350,6 +354,9 @@ func main() { agentOnlyFlag(a, "storage.agent.wal-compression", "Compress the agent WAL."). Default("true").BoolVar(&cfg.agent.WALCompression) + agentOnlyFlag(a, "storage.agent.wal-compression-type", "Compression algorithm for the agent WAL."). + Hidden().Default(string(wlog.CompressionSnappy)).EnumVar(&cfg.agent.WALCompressionType, string(wlog.CompressionSnappy), string(wlog.CompressionZstd)) + agentOnlyFlag(a, "storage.agent.wal-truncate-frequency", "The frequency at which to truncate the WAL and remove old data."). Hidden().PlaceHolder("").SetValue(&cfg.agent.TruncateFrequency) @@ -1546,6 +1553,7 @@ type tsdbOptions struct { MaxBytes units.Base2Bytes NoLockfile bool WALCompression bool + WALCompressionType string HeadChunksWriteQueueSize int SamplesPerChunk int StripeSize int @@ -1566,7 +1574,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { MaxBytes: int64(opts.MaxBytes), NoLockfile: opts.NoLockfile, AllowOverlappingCompaction: true, - WALCompression: opts.WALCompression, + WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType), HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize, SamplesPerChunk: opts.SamplesPerChunk, StripeSize: opts.StripeSize, @@ -1585,6 +1593,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { type agentOptions struct { WALSegmentSize units.Base2Bytes WALCompression bool + WALCompressionType string StripeSize int TruncateFrequency model.Duration MinWALTime, MaxWALTime model.Duration @@ -1594,7 +1603,7 @@ type agentOptions struct { func (opts agentOptions) ToAgentOptions() agent.Options { return agent.Options{ WALSegmentSize: int(opts.WALSegmentSize), - WALCompression: opts.WALCompression, + WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType), StripeSize: opts.StripeSize, TruncateFrequency: time.Duration(opts.TruncateFrequency), MinWALTime: durationToInt64Millis(time.Duration(opts.MinWALTime)), diff --git a/go.mod b/go.mod index 48db76291..4597ee420 100644 --- a/go.mod +++ b/go.mod @@ -34,6 +34,7 @@ require ( github.com/hetznercloud/hcloud-go v1.47.0 github.com/ionos-cloud/sdk-go/v6 v6.1.7 github.com/json-iterator/go v1.1.12 + github.com/klauspost/compress v1.15.12 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/linode/linodego v1.17.2 github.com/miekg/dns v1.1.54 diff --git a/go.sum b/go.sum index af10deb8b..57f9f82c6 100644 --- a/go.sum +++ b/go.sum @@ -507,6 +507,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM= +github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 13cad6bfc..d47095a23 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -65,8 +65,8 @@ type Options struct { // WALSegmentSize > 0, segment size is WALSegmentSize. WALSegmentSize int - // WALCompression will turn on Snappy compression for records on the WAL. - WALCompression bool + // WALCompression configures the compression type to use on records in the WAL. + WALCompression wlog.CompressionType // StripeSize is the size (power of 2) in entries of the series hash map. Reducing the size will save memory but impact performance. StripeSize int @@ -87,7 +87,7 @@ type Options struct { func DefaultOptions() *Options { return &Options{ WALSegmentSize: wlog.DefaultSegmentSize, - WALCompression: false, + WALCompression: wlog.CompressionNone, StripeSize: tsdb.DefaultStripeSize, TruncateFrequency: DefaultTruncateFrequency, MinWALTime: DefaultMinWALTime, @@ -318,6 +318,10 @@ func validateOptions(opts *Options) *Options { opts.WALSegmentSize = wlog.DefaultSegmentSize } + if opts.WALCompression == "" { + opts.WALCompression = wlog.CompressionNone + } + // Revert Stripesize to DefaultStripsize if Stripsize is either 0 or not a power of 2. if opts.StripeSize <= 0 || ((opts.StripeSize & (opts.StripeSize - 1)) != 0) { opts.StripeSize = tsdb.DefaultStripeSize diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 27a5cdfa8..d20918268 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -39,6 +39,7 @@ import ( "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/tsdbutil" + "github.com/prometheus/prometheus/tsdb/wlog" ) func TestSplitByRange(t *testing.T) { @@ -1306,7 +1307,7 @@ func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) { func TestHeadCompactionWithHistograms(t *testing.T) { for _, floatTest := range []bool{true, false} { t.Run(fmt.Sprintf("float=%t", floatTest), func(t *testing.T) { - head, _ := newTestHead(t, DefaultBlockDuration, false, false) + head, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) require.NoError(t, head.Init(0)) t.Cleanup(func() { require.NoError(t, head.Close()) @@ -1485,11 +1486,11 @@ func TestSparseHistogramSpaceSavings(t *testing.T) { c.numBuckets, ), func(t *testing.T) { - oldHead, _ := newTestHead(t, DefaultBlockDuration, false, false) + oldHead, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, oldHead.Close()) }) - sparseHead, _ := newTestHead(t, DefaultBlockDuration, false, false) + sparseHead, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, sparseHead.Close()) }) diff --git a/tsdb/db.go b/tsdb/db.go index 62359a737..2ca6034a0 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -77,8 +77,8 @@ func DefaultOptions() *Options { MaxBlockDuration: DefaultBlockDuration, NoLockfile: false, AllowOverlappingCompaction: true, - WALCompression: false, SamplesPerChunk: DefaultSamplesPerChunk, + WALCompression: wlog.CompressionNone, StripeSize: DefaultStripeSize, HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize, IsolationDisabled: defaultIsolationDisabled, @@ -123,8 +123,8 @@ type Options struct { // For Prometheus, this will always be true. AllowOverlappingCompaction bool - // WALCompression will turn on Snappy compression for records on the WAL. - WALCompression bool + // WALCompression configures the compression type to use on records in the WAL. + WALCompression wlog.CompressionType // Maximum number of CPUs that can simultaneously processes WAL replay. // If it is <=0, then GOMAXPROCS is used. diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 427a3b7af..c746d5018 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -1965,7 +1965,7 @@ func TestInitializeHeadTimestamp(t *testing.T) { dir := t.TempDir() require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777)) - w, err := wlog.New(nil, nil, path.Join(dir, "wal"), false) + w, err := wlog.New(nil, nil, path.Join(dir, "wal"), wlog.CompressionNone) require.NoError(t, err) var enc record.Encoder @@ -2007,7 +2007,7 @@ func TestInitializeHeadTimestamp(t *testing.T) { createBlock(t, dir, genSeries(1, 1, 1000, 6000)) require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777)) - w, err := wlog.New(nil, nil, path.Join(dir, "wal"), false) + w, err := wlog.New(nil, nil, path.Join(dir, "wal"), wlog.CompressionNone) require.NoError(t, err) var enc record.Encoder @@ -2408,7 +2408,7 @@ func TestDBReadOnly(t *testing.T) { } // Add head to test DBReadOnly WAL reading capabilities. - w, err := wlog.New(logger, nil, filepath.Join(dbDir, "wal"), true) + w, err := wlog.New(logger, nil, filepath.Join(dbDir, "wal"), wlog.CompressionSnappy) require.NoError(t, err) h := createHead(t, w, genSeries(1, 1, 16, 18), dbDir) require.NoError(t, h.Close()) @@ -2972,7 +2972,7 @@ func TestCompactHead(t *testing.T) { NoLockfile: true, MinBlockDuration: int64(time.Hour * 2 / time.Millisecond), MaxBlockDuration: int64(time.Hour * 2 / time.Millisecond), - WALCompression: true, + WALCompression: wlog.CompressionSnappy, } db, err := Open(dbDir, log.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) @@ -3912,7 +3912,7 @@ func TestMetadataCheckpointingOnlyKeepsLatestEntry(t *testing.T) { ctx := context.Background() numSamples := 10000 - hb, w := newTestHead(t, int64(numSamples)*10, false, false) + hb, w := newTestHead(t, int64(numSamples)*10, wlog.CompressionNone, false) // Add some series so we can append metadata to them. app := hb.Appender(ctx) @@ -5099,7 +5099,7 @@ func TestWBLAndMmapReplay(t *testing.T) { resetMmapToOriginal() // We neet to reset because new duplicate chunks can be written above. // Removing m-map markers in WBL by rewriting it. - newWbl, err := wlog.New(log.NewNopLogger(), nil, filepath.Join(t.TempDir(), "new_wbl"), false) + newWbl, err := wlog.New(log.NewNopLogger(), nil, filepath.Join(t.TempDir(), "new_wbl"), wlog.CompressionNone) require.NoError(t, err) sr, err := wlog.NewSegmentsReader(originalWblDir) require.NoError(t, err) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 282810620..f9eb88ba1 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -52,7 +52,7 @@ import ( "github.com/prometheus/prometheus/tsdb/wlog" ) -func newTestHead(t testing.TB, chunkRange int64, compressWAL, oooEnabled bool) (*Head, *wlog.WL) { +func newTestHead(t testing.TB, chunkRange int64, compressWAL wlog.CompressionType, oooEnabled bool) (*Head, *wlog.WL) { dir := t.TempDir() wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, compressWAL) require.NoError(t, err) @@ -79,7 +79,7 @@ func newTestHead(t testing.TB, chunkRange int64, compressWAL, oooEnabled bool) ( func BenchmarkCreateSeries(b *testing.B) { series := genSeries(b.N, 10, 0, 0) - h, _ := newTestHead(b, 10000, false, false) + h, _ := newTestHead(b, 10000, wlog.CompressionNone, false) b.Cleanup(func() { require.NoError(b, h.Close()) }) @@ -100,7 +100,7 @@ func BenchmarkHeadAppender_Append_Commit_ExistingSeries(b *testing.B) { b.Run(fmt.Sprintf("%d series", seriesCount), func(b *testing.B) { for _, samplesPerAppend := range []int64{1, 2, 5, 100} { b.Run(fmt.Sprintf("%d samples per append", samplesPerAppend), func(b *testing.B) { - h, _ := newTestHead(b, 10000, false, false) + h, _ := newTestHead(b, 10000, wlog.CompressionNone, false) b.Cleanup(func() { require.NoError(b, h.Close()) }) ts := int64(1000) @@ -245,7 +245,7 @@ func BenchmarkLoadWAL(b *testing.B) { func(b *testing.B) { dir := b.TempDir() - w, err := wlog.New(nil, nil, dir, false) + w, err := wlog.New(nil, nil, dir, wlog.CompressionNone) require.NoError(b, err) // Write series. @@ -337,7 +337,7 @@ func BenchmarkLoadWAL(b *testing.B) { // While appending the samples to the head it concurrently queries them from multiple go routines and verifies that the // returned results are correct. func TestHead_HighConcurrencyReadAndWrite(t *testing.T) { - head, _ := newTestHead(t, DefaultBlockDuration, false, false) + head, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) defer func() { require.NoError(t, head.Close()) }() @@ -527,8 +527,8 @@ func TestHead_HighConcurrencyReadAndWrite(t *testing.T) { } func TestHead_ReadWAL(t *testing.T) { - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []wlog.CompressionType{wlog.CompressionNone, wlog.CompressionSnappy, wlog.CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { entries := []interface{}{ []record.RefSeries{ {Ref: 10, Labels: labels.FromStrings("a", "1")}, @@ -609,7 +609,7 @@ func TestHead_ReadWAL(t *testing.T) { } func TestHead_WALMultiRef(t *testing.T) { - head, w := newTestHead(t, 1000, false, false) + head, w := newTestHead(t, 1000, wlog.CompressionNone, false) require.NoError(t, head.Init(0)) @@ -644,7 +644,7 @@ func TestHead_WALMultiRef(t *testing.T) { require.NotEqual(t, ref1, ref2, "Refs are the same") require.NoError(t, head.Close()) - w, err = wlog.New(nil, nil, w.Dir(), false) + w, err = wlog.New(nil, nil, w.Dir(), wlog.CompressionNone) require.NoError(t, err) opts := DefaultHeadOptions() @@ -669,7 +669,7 @@ func TestHead_WALMultiRef(t *testing.T) { } func TestHead_ActiveAppenders(t *testing.T) { - head, _ := newTestHead(t, 1000, false, false) + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer head.Close() require.NoError(t, head.Init(0)) @@ -702,14 +702,14 @@ func TestHead_ActiveAppenders(t *testing.T) { } func TestHead_UnknownWALRecord(t *testing.T) { - head, w := newTestHead(t, 1000, false, false) + head, w := newTestHead(t, 1000, wlog.CompressionNone, false) w.Log([]byte{255, 42}) require.NoError(t, head.Init(0)) require.NoError(t, head.Close()) } func TestHead_Truncate(t *testing.T) { - h, _ := newTestHead(t, 1000, false, false) + h, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, h.Close()) }() @@ -847,8 +847,8 @@ func TestMemSeries_truncateChunks(t *testing.T) { } func TestHeadDeleteSeriesWithoutSamples(t *testing.T) { - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []wlog.CompressionType{wlog.CompressionNone, wlog.CompressionSnappy, wlog.CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { entries := []interface{}{ []record.RefSeries{ {Ref: 10, Labels: labels.FromStrings("a", "1")}, @@ -927,8 +927,8 @@ func TestHeadDeleteSimple(t *testing.T) { }, } - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []wlog.CompressionType{wlog.CompressionNone, wlog.CompressionSnappy, wlog.CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { for _, c := range cases { head, w := newTestHead(t, 1000, compress, false) require.NoError(t, head.Init(0)) @@ -1011,7 +1011,7 @@ func TestHeadDeleteSimple(t *testing.T) { } func TestDeleteUntilCurMax(t *testing.T) { - hb, _ := newTestHead(t, 1000000, false, false) + hb, _ := newTestHead(t, 1000000, wlog.CompressionNone, false) defer func() { require.NoError(t, hb.Close()) }() @@ -1064,7 +1064,7 @@ func TestDeletedSamplesAndSeriesStillInWALAfterCheckpoint(t *testing.T) { numSamples := 10000 // Enough samples to cause a checkpoint. - hb, w := newTestHead(t, int64(numSamples)*10, false, false) + hb, w := newTestHead(t, int64(numSamples)*10, wlog.CompressionNone, false) for i := 0; i < numSamples; i++ { app := hb.Appender(context.Background()) @@ -1156,7 +1156,7 @@ func TestDelete_e2e(t *testing.T) { seriesMap[labels.New(l...).String()] = []tsdbutil.Sample{} } - hb, _ := newTestHead(t, 100000, false, false) + hb, _ := newTestHead(t, 100000, wlog.CompressionNone, false) defer func() { require.NoError(t, hb.Close()) }() @@ -1506,7 +1506,7 @@ func TestMemSeries_append_atVariableRate(t *testing.T) { func TestGCChunkAccess(t *testing.T) { // Put a chunk, select it. GC it and then access it. const chunkRange = 1000 - h, _ := newTestHead(t, chunkRange, false, false) + h, _ := newTestHead(t, chunkRange, wlog.CompressionNone, false) defer func() { require.NoError(t, h.Close()) }() @@ -1565,7 +1565,7 @@ func TestGCChunkAccess(t *testing.T) { func TestGCSeriesAccess(t *testing.T) { // Put a series, select it. GC it and then access it. const chunkRange = 1000 - h, _ := newTestHead(t, chunkRange, false, false) + h, _ := newTestHead(t, chunkRange, wlog.CompressionNone, false) defer func() { require.NoError(t, h.Close()) }() @@ -1624,7 +1624,7 @@ func TestGCSeriesAccess(t *testing.T) { } func TestUncommittedSamplesNotLostOnTruncate(t *testing.T) { - h, _ := newTestHead(t, 1000, false, false) + h, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, h.Close()) }() @@ -1654,7 +1654,7 @@ func TestUncommittedSamplesNotLostOnTruncate(t *testing.T) { } func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) { - h, _ := newTestHead(t, 1000, false, false) + h, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, h.Close()) }() @@ -1685,8 +1685,8 @@ func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) { } func TestHead_LogRollback(t *testing.T) { - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []wlog.CompressionType{wlog.CompressionNone, wlog.CompressionSnappy, wlog.CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { h, w := newTestHead(t, 1000, compress, false) defer func() { require.NoError(t, h.Close()) @@ -1743,8 +1743,8 @@ func TestWalRepair_DecodingError(t *testing.T) { 5, }, } { - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("%s,compress=%t", name, compress), func(t *testing.T) { + for _, compress := range []wlog.CompressionType{wlog.CompressionNone, wlog.CompressionSnappy, wlog.CompressionZstd} { + t.Run(fmt.Sprintf("%s,compress=%s", name, compress), func(t *testing.T) { dir := t.TempDir() // Fill the wal and corrupt it. @@ -1812,7 +1812,7 @@ func TestHeadReadWriterRepair(t *testing.T) { walDir := filepath.Join(dir, "wal") // Fill the chunk segments and corrupt it. { - w, err := wlog.New(nil, nil, walDir, false) + w, err := wlog.New(nil, nil, walDir, wlog.CompressionNone) require.NoError(t, err) opts := DefaultHeadOptions() @@ -1880,7 +1880,7 @@ func TestHeadReadWriterRepair(t *testing.T) { } func TestNewWalSegmentOnTruncate(t *testing.T) { - h, wal := newTestHead(t, 1000, false, false) + h, wal := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, h.Close()) }() @@ -1910,7 +1910,7 @@ func TestNewWalSegmentOnTruncate(t *testing.T) { } func TestAddDuplicateLabelName(t *testing.T) { - h, _ := newTestHead(t, 1000, false, false) + h, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, h.Close()) }() @@ -1993,7 +1993,7 @@ func TestMemSeriesIsolation(t *testing.T) { } // Test isolation without restart of Head. - hb, _ := newTestHead(t, 1000, false, false) + hb, _ := newTestHead(t, 1000, wlog.CompressionNone, false) i := addSamples(hb) testIsolation(hb, i) @@ -2055,11 +2055,11 @@ func TestMemSeriesIsolation(t *testing.T) { require.NoError(t, hb.Close()) // Test isolation with restart of Head. This is to verify the num samples of chunks after m-map chunk replay. - hb, w := newTestHead(t, 1000, false, false) + hb, w := newTestHead(t, 1000, wlog.CompressionNone, false) i = addSamples(hb) require.NoError(t, hb.Close()) - wal, err := wlog.NewSize(nil, nil, w.Dir(), 32768, false) + wal, err := wlog.NewSize(nil, nil, w.Dir(), 32768, wlog.CompressionNone) require.NoError(t, err) opts := DefaultHeadOptions() opts.ChunkRange = 1000 @@ -2108,7 +2108,7 @@ func TestIsolationRollback(t *testing.T) { } // Rollback after a failed append and test if the low watermark has progressed anyway. - hb, _ := newTestHead(t, 1000, false, false) + hb, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, hb.Close()) }() @@ -2139,7 +2139,7 @@ func TestIsolationLowWatermarkMonotonous(t *testing.T) { t.Skip("skipping test since tsdb isolation is disabled") } - hb, _ := newTestHead(t, 1000, false, false) + hb, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, hb.Close()) }() @@ -2176,7 +2176,7 @@ func TestIsolationAppendIDZeroIsNoop(t *testing.T) { t.Skip("skipping test since tsdb isolation is disabled") } - h, _ := newTestHead(t, 1000, false, false) + h, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, h.Close()) }() @@ -2207,7 +2207,7 @@ func TestIsolationWithoutAdd(t *testing.T) { t.Skip("skipping test since tsdb isolation is disabled") } - hb, _ := newTestHead(t, 1000, false, false) + hb, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, hb.Close()) }() @@ -2302,7 +2302,7 @@ func TestOutOfOrderSamplesMetric(t *testing.T) { } func testHeadSeriesChunkRace(t *testing.T) { - h, _ := newTestHead(t, 1000, false, false) + h, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, h.Close()) }() @@ -2337,7 +2337,7 @@ func testHeadSeriesChunkRace(t *testing.T) { } func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) { - head, _ := newTestHead(t, 1000, false, false) + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, head.Close()) }() @@ -2397,7 +2397,7 @@ func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) { } func TestHeadLabelValuesWithMatchers(t *testing.T) { - head, _ := newTestHead(t, 1000, false, false) + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, head.Close()) }) app := head.Appender(context.Background()) @@ -2456,7 +2456,7 @@ func TestHeadLabelValuesWithMatchers(t *testing.T) { } func TestHeadLabelNamesWithMatchers(t *testing.T) { - head, _ := newTestHead(t, 1000, false, false) + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, head.Close()) }() @@ -2524,7 +2524,7 @@ func TestHeadLabelNamesWithMatchers(t *testing.T) { } func TestErrReuseAppender(t *testing.T) { - head, _ := newTestHead(t, 1000, false, false) + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, head.Close()) }() @@ -2560,7 +2560,7 @@ func TestErrReuseAppender(t *testing.T) { func TestHeadMintAfterTruncation(t *testing.T) { chunkRange := int64(2000) - head, _ := newTestHead(t, chunkRange, false, false) + head, _ := newTestHead(t, chunkRange, wlog.CompressionNone, false) app := head.Appender(context.Background()) _, err := app.Append(0, labels.FromStrings("a", "b"), 100, 100) @@ -2594,7 +2594,7 @@ func TestHeadMintAfterTruncation(t *testing.T) { func TestHeadExemplars(t *testing.T) { chunkRange := int64(2000) - head, _ := newTestHead(t, chunkRange, false, false) + head, _ := newTestHead(t, chunkRange, wlog.CompressionNone, false) app := head.Appender(context.Background()) l := labels.FromStrings("traceId", "123") @@ -2616,7 +2616,7 @@ func TestHeadExemplars(t *testing.T) { func BenchmarkHeadLabelValuesWithMatchers(b *testing.B) { chunkRange := int64(2000) - head, _ := newTestHead(b, chunkRange, false, false) + head, _ := newTestHead(b, chunkRange, wlog.CompressionNone, false) b.Cleanup(func() { require.NoError(b, head.Close()) }) app := head.Appender(context.Background()) @@ -2930,7 +2930,7 @@ func TestAppendHistogram(t *testing.T) { l := labels.FromStrings("a", "b") for _, numHistograms := range []int{1, 10, 150, 200, 250, 300} { t.Run(fmt.Sprintf("%d", numHistograms), func(t *testing.T) { - head, _ := newTestHead(t, 1000, false, false) + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, head.Close()) }) @@ -3034,7 +3034,7 @@ func TestAppendHistogram(t *testing.T) { } func TestHistogramInWALAndMmapChunk(t *testing.T) { - head, _ := newTestHead(t, 3000, false, false) + head, _ := newTestHead(t, 3000, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, head.Close()) }) @@ -3188,7 +3188,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { // Restart head. require.NoError(t, head.Close()) startHead := func() { - w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false) + w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone) require.NoError(t, err) head, err = NewHead(nil, nil, w, nil, head.opts, nil) require.NoError(t, err) @@ -3217,7 +3217,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { } func TestChunkSnapshot(t *testing.T) { - head, _ := newTestHead(t, 120*4, false, false) + head, _ := newTestHead(t, 120*4, wlog.CompressionNone, false) defer func() { head.opts.EnableMemorySnapshotOnShutdown = false require.NoError(t, head.Close()) @@ -3310,7 +3310,7 @@ func TestChunkSnapshot(t *testing.T) { } openHeadAndCheckReplay := func() { - w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false) + w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone) require.NoError(t, err) head, err = NewHead(nil, nil, w, nil, head.opts, nil) require.NoError(t, err) @@ -3505,7 +3505,7 @@ func TestChunkSnapshot(t *testing.T) { } func TestSnapshotError(t *testing.T) { - head, _ := newTestHead(t, 120*4, false, false) + head, _ := newTestHead(t, 120*4, wlog.CompressionNone, false) defer func() { head.opts.EnableMemorySnapshotOnShutdown = false require.NoError(t, head.Close()) @@ -3562,7 +3562,7 @@ func TestSnapshotError(t *testing.T) { require.NoError(t, f.Close()) // Create new Head which should replay this snapshot. - w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false) + w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone) require.NoError(t, err) // Testing https://github.com/prometheus/prometheus/issues/9437 with the registry. head, err = NewHead(prometheus.NewRegistry(), nil, w, nil, head.opts, nil) @@ -3579,7 +3579,7 @@ func TestSnapshotError(t *testing.T) { func TestHistogramMetrics(t *testing.T) { numHistograms := 10 - head, _ := newTestHead(t, 1000, false, false) + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, head.Close()) }) @@ -3609,7 +3609,7 @@ func TestHistogramMetrics(t *testing.T) { require.Equal(t, float64(expHSamples), prom_testutil.ToFloat64(head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram))) require.NoError(t, head.Close()) - w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false) + w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone) require.NoError(t, err) head, err = NewHead(nil, nil, w, nil, head.opts, nil) require.NoError(t, err) @@ -3631,7 +3631,7 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) { t.Helper() l := labels.FromStrings("a", "b") numHistograms := 20 - head, _ := newTestHead(t, 100000, false, false) + head, _ := newTestHead(t, 100000, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, head.Close()) }) @@ -3778,7 +3778,7 @@ func TestHistogramCounterResetHeader(t *testing.T) { for _, floatHisto := range []bool{true, false} { t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) { l := labels.FromStrings("a", "b") - head, _ := newTestHead(t, 1000, false, false) + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, head.Close()) }) @@ -4041,7 +4041,7 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) { // Tests https://github.com/prometheus/prometheus/issues/9725. func TestChunkSnapshotReplayBug(t *testing.T) { dir := t.TempDir() - wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, true) + wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) require.NoError(t, err) // Write few series records and samples such that the series references are not in order in the WAL @@ -4108,7 +4108,7 @@ func TestChunkSnapshotReplayBug(t *testing.T) { func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) { dir := t.TempDir() - wlTemp, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, true) + wlTemp, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) require.NoError(t, err) // Write a snapshot with .tmp suffix. This used to fail taking any further snapshots or replay of snapshots. @@ -4146,9 +4146,9 @@ func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) { // TODO(codesome): Needs test for ooo WAL repair. func TestOOOWalReplay(t *testing.T) { dir := t.TempDir() - wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, true) + wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) require.NoError(t, err) - oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, true) + oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, wlog.CompressionSnappy) require.NoError(t, err) opts := DefaultHeadOptions() @@ -4193,9 +4193,9 @@ func TestOOOWalReplay(t *testing.T) { // Restart head. require.NoError(t, h.Close()) - wal, err = wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, true) + wal, err = wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) require.NoError(t, err) - oooWlog, err = wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, true) + oooWlog, err = wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, wlog.CompressionSnappy) require.NoError(t, err) h, err = NewHead(nil, nil, wal, oooWlog, opts, nil) require.NoError(t, err) @@ -4230,9 +4230,9 @@ func TestOOOWalReplay(t *testing.T) { // TestOOOMmapReplay checks the replay at a low level. func TestOOOMmapReplay(t *testing.T) { dir := t.TempDir() - wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, true) + wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) require.NoError(t, err) - oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, true) + oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, wlog.CompressionSnappy) require.NoError(t, err) opts := DefaultHeadOptions() @@ -4281,9 +4281,9 @@ func TestOOOMmapReplay(t *testing.T) { // Restart head. require.NoError(t, h.Close()) - wal, err = wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, true) + wal, err = wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) require.NoError(t, err) - oooWlog, err = wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, true) + oooWlog, err = wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, wlog.CompressionSnappy) require.NoError(t, err) h, err = NewHead(nil, nil, wal, oooWlog, opts, nil) require.NoError(t, err) @@ -4312,7 +4312,7 @@ func TestOOOMmapReplay(t *testing.T) { } func TestHeadInit_DiscardChunksWithUnsupportedEncoding(t *testing.T) { - h, _ := newTestHead(t, 1000, false, false) + h, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { require.NoError(t, h.Close()) }() @@ -4355,7 +4355,7 @@ func TestHeadInit_DiscardChunksWithUnsupportedEncoding(t *testing.T) { require.NoError(t, h.Close()) - wal, err := wlog.NewSize(nil, nil, filepath.Join(h.opts.ChunkDirRoot, "wal"), 32768, false) + wal, err := wlog.NewSize(nil, nil, filepath.Join(h.opts.ChunkDirRoot, "wal"), 32768, wlog.CompressionNone) require.NoError(t, err) h, err = NewHead(nil, nil, wal, nil, h.opts, nil) require.NoError(t, err) @@ -4390,7 +4390,7 @@ func (c *unsupportedChunk) Encoding() chunkenc.Encoding { // Tests https://github.com/prometheus/prometheus/issues/10277. func TestMmapPanicAfterMmapReplayCorruption(t *testing.T) { dir := t.TempDir() - wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, false) + wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionNone) require.NoError(t, err) opts := DefaultHeadOptions() @@ -4423,7 +4423,7 @@ func TestMmapPanicAfterMmapReplayCorruption(t *testing.T) { addChunks() require.NoError(t, h.Close()) - wal, err = wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, false) + wal, err = wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionNone) require.NoError(t, err) mmapFilePath := filepath.Join(dir, "chunks_head", "000001") @@ -4449,7 +4449,7 @@ func TestReplayAfterMmapReplayError(t *testing.T) { var err error openHead := func() { - wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, false) + wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionNone) require.NoError(t, err) opts := DefaultHeadOptions() @@ -4667,9 +4667,9 @@ func generateBigTestHistograms(n int) []*histogram.Histogram { func TestOOOAppendWithNoSeries(t *testing.T) { dir := t.TempDir() - wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, true) + wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) require.NoError(t, err) - oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, true) + oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, wlog.CompressionSnappy) require.NoError(t, err) opts := DefaultHeadOptions() @@ -4748,9 +4748,9 @@ func TestOOOAppendWithNoSeries(t *testing.T) { func TestHeadMinOOOTimeUpdate(t *testing.T) { dir := t.TempDir() - wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, true) + wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) require.NoError(t, err) - oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, true) + oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, wlog.CompressionSnappy) require.NoError(t, err) opts := DefaultHeadOptions() @@ -4795,7 +4795,7 @@ func TestHeadMinOOOTimeUpdate(t *testing.T) { func TestGaugeHistogramWALAndChunkHeader(t *testing.T) { l := labels.FromStrings("a", "b") - head, _ := newTestHead(t, 1000, false, false) + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, head.Close()) }) @@ -4859,7 +4859,7 @@ func TestGaugeHistogramWALAndChunkHeader(t *testing.T) { require.NoError(t, head.Close()) require.NoError(t, os.RemoveAll(mmappedChunksDir(head.opts.ChunkDirRoot))) - w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false) + w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone) require.NoError(t, err) head, err = NewHead(nil, nil, w, nil, head.opts, nil) require.NoError(t, err) @@ -4870,7 +4870,7 @@ func TestGaugeHistogramWALAndChunkHeader(t *testing.T) { func TestGaugeFloatHistogramWALAndChunkHeader(t *testing.T) { l := labels.FromStrings("a", "b") - head, _ := newTestHead(t, 1000, false, false) + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, head.Close()) }) @@ -4934,7 +4934,7 @@ func TestGaugeFloatHistogramWALAndChunkHeader(t *testing.T) { require.NoError(t, head.Close()) require.NoError(t, os.RemoveAll(mmappedChunksDir(head.opts.ChunkDirRoot))) - w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false) + w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone) require.NoError(t, err) head, err = NewHead(nil, nil, w, nil, head.opts, nil) require.NoError(t, err) @@ -4944,7 +4944,7 @@ func TestGaugeFloatHistogramWALAndChunkHeader(t *testing.T) { } func TestSnapshotAheadOfWALError(t *testing.T) { - head, _ := newTestHead(t, 120*4, false, false) + head, _ := newTestHead(t, 120*4, wlog.CompressionNone, false) head.opts.EnableMemorySnapshotOnShutdown = true // Add a sample to fill WAL. app := head.Appender(context.Background()) @@ -4967,7 +4967,7 @@ func TestSnapshotAheadOfWALError(t *testing.T) { // to keep using the same snapshot directory instead of a random one. require.NoError(t, os.RemoveAll(head.wal.Dir())) head.opts.EnableMemorySnapshotOnShutdown = false - w, _ := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false) + w, _ := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone) head, err = NewHead(nil, nil, w, nil, head.opts, nil) require.NoError(t, err) // Add a sample to fill WAL. @@ -4986,7 +4986,7 @@ func TestSnapshotAheadOfWALError(t *testing.T) { // Create new Head which should detect the incorrect index and delete the snapshot. head.opts.EnableMemorySnapshotOnShutdown = true - w, _ = wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false) + w, _ = wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone) head, err = NewHead(nil, nil, w, nil, head.opts, nil) require.NoError(t, err) require.NoError(t, head.Init(math.MinInt64)) diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 2fe33befb..2397a9ec9 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -1119,7 +1119,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { if err := os.MkdirAll(cpdirtmp, 0o777); err != nil { return stats, errors.Wrap(err, "create chunk snapshot dir") } - cp, err := wlog.New(nil, nil, cpdirtmp, h.wal.CompressionEnabled()) + cp, err := wlog.New(nil, nil, cpdirtmp, h.wal.CompressionType()) if err != nil { return stats, errors.Wrap(err, "open chunk snapshot") } diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index ed9ca2769..6c0038f89 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -29,6 +29,7 @@ import ( "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/tsdbutil" + "github.com/prometheus/prometheus/tsdb/wlog" ) type chunkInterval struct { @@ -295,7 +296,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { for perm, intervals := range permutations { for _, headChunk := range []bool{false, true} { t.Run(fmt.Sprintf("name=%s, permutation=%d, headChunk=%t", tc.name, perm, headChunk), func(t *testing.T) { - h, _ := newTestHead(t, 1000, false, true) + h, _ := newTestHead(t, 1000, wlog.CompressionNone, true) defer func() { require.NoError(t, h.Close()) }() @@ -375,7 +376,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { func TestOOOHeadChunkReader_LabelValues(t *testing.T) { chunkRange := int64(2000) - head, _ := newTestHead(t, chunkRange, false, true) + head, _ := newTestHead(t, chunkRange, wlog.CompressionNone, true) t.Cleanup(func() { require.NoError(t, head.Close()) }) app := head.Appender(context.Background()) diff --git a/tsdb/wal.go b/tsdb/wal.go index 70378021a..3a410fb63 100644 --- a/tsdb/wal.go +++ b/tsdb/wal.go @@ -1226,7 +1226,7 @@ func MigrateWAL(logger log.Logger, dir string) (err error) { if err := os.RemoveAll(tmpdir); err != nil { return errors.Wrap(err, "cleanup replacement dir") } - repl, err := wlog.New(logger, nil, tmpdir, false) + repl, err := wlog.New(logger, nil, tmpdir, wlog.CompressionNone) if err != nil { return errors.Wrap(err, "open new WAL") } diff --git a/tsdb/wal_test.go b/tsdb/wal_test.go index da242b875..5b2911131 100644 --- a/tsdb/wal_test.go +++ b/tsdb/wal_test.go @@ -450,7 +450,7 @@ func TestMigrateWAL_Empty(t *testing.T) { wdir := path.Join(dir, "wal") // Initialize empty WAL. - w, err := wlog.New(nil, nil, wdir, false) + w, err := wlog.New(nil, nil, wdir, wlog.CompressionNone) require.NoError(t, err) require.NoError(t, w.Close()) @@ -493,7 +493,7 @@ func TestMigrateWAL_Fuzz(t *testing.T) { // Perform migration. require.NoError(t, MigrateWAL(nil, wdir)) - w, err := wlog.New(nil, nil, wdir, false) + w, err := wlog.New(nil, nil, wdir, wlog.CompressionNone) require.NoError(t, err) // We can properly write some new data after migration. diff --git a/tsdb/wlog/checkpoint.go b/tsdb/wlog/checkpoint.go index fe9952a30..dd52ea2e3 100644 --- a/tsdb/wlog/checkpoint.go +++ b/tsdb/wlog/checkpoint.go @@ -134,7 +134,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head if err := os.MkdirAll(cpdirtmp, 0o777); err != nil { return nil, errors.Wrap(err, "create checkpoint dir") } - cp, err := New(nil, nil, cpdirtmp, w.CompressionEnabled()) + cp, err := New(nil, nil, cpdirtmp, w.CompressionType()) if err != nil { return nil, errors.Wrap(err, "open checkpoint") } diff --git a/tsdb/wlog/checkpoint_test.go b/tsdb/wlog/checkpoint_test.go index 22f577efd..704a65cc1 100644 --- a/tsdb/wlog/checkpoint_test.go +++ b/tsdb/wlog/checkpoint_test.go @@ -126,8 +126,8 @@ func TestCheckpoint(t *testing.T) { } } - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { dir := t.TempDir() var enc record.Encoder @@ -303,7 +303,7 @@ func TestCheckpoint(t *testing.T) { func TestCheckpointNoTmpFolderAfterError(t *testing.T) { // Create a new wlog with invalid data. dir := t.TempDir() - w, err := NewSize(nil, nil, dir, 64*1024, false) + w, err := NewSize(nil, nil, dir, 64*1024, CompressionNone) require.NoError(t, err) var enc record.Encoder require.NoError(t, w.Log(enc.Series([]record.RefSeries{ diff --git a/tsdb/wlog/live_reader.go b/tsdb/wlog/live_reader.go index 0ca69093a..c69017051 100644 --- a/tsdb/wlog/live_reader.go +++ b/tsdb/wlog/live_reader.go @@ -23,6 +23,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/golang/snappy" + "github.com/klauspost/compress/zstd" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" ) @@ -51,10 +52,14 @@ func NewLiveReaderMetrics(reg prometheus.Registerer) *LiveReaderMetrics { // NewLiveReader returns a new live reader. func NewLiveReader(logger log.Logger, metrics *LiveReaderMetrics, r io.Reader) *LiveReader { + // Calling zstd.NewReader with a nil io.Reader and no options cannot return an error. + zstdReader, _ := zstd.NewReader(nil) + lr := &LiveReader{ - logger: logger, - rdr: r, - metrics: metrics, + logger: logger, + rdr: r, + zstdReader: zstdReader, + metrics: metrics, // Until we understand how they come about, make readers permissive // to records spanning pages. @@ -68,17 +73,18 @@ func NewLiveReader(logger log.Logger, metrics *LiveReaderMetrics, r io.Reader) * // that are still in the process of being written, and returns records as soon // as they can be read. type LiveReader struct { - logger log.Logger - rdr io.Reader - err error - rec []byte - snappyBuf []byte - hdr [recordHeaderSize]byte - buf [pageSize]byte - readIndex int // Index in buf to start at for next read. - writeIndex int // Index in buf to start at for next write. - total int64 // Total bytes processed during reading in calls to Next(). - index int // Used to track partial records, should be 0 at the start of every new record. + logger log.Logger + rdr io.Reader + err error + rec []byte + compressBuf []byte + zstdReader *zstd.Decoder + hdr [recordHeaderSize]byte + buf [pageSize]byte + readIndex int // Index in buf to start at for next read. + writeIndex int // Index in buf to start at for next write. + total int64 // Total bytes processed during reading in calls to Next(). + index int // Used to track partial records, should be 0 at the start of every new record. // For testing, we can treat EOF as a non-error. eofNonErr bool @@ -191,12 +197,14 @@ func (r *LiveReader) buildRecord() (bool, error) { rt := recTypeFromHeader(r.hdr[0]) if rt == recFirst || rt == recFull { r.rec = r.rec[:0] - r.snappyBuf = r.snappyBuf[:0] + r.compressBuf = r.compressBuf[:0] } - compressed := r.hdr[0]&snappyMask != 0 - if compressed { - r.snappyBuf = append(r.snappyBuf, temp...) + isSnappyCompressed := r.hdr[0]&snappyMask == snappyMask + isZstdCompressed := r.hdr[0]&zstdMask == zstdMask + + if isSnappyCompressed || isZstdCompressed { + r.compressBuf = append(r.compressBuf, temp...) } else { r.rec = append(r.rec, temp...) } @@ -207,12 +215,17 @@ func (r *LiveReader) buildRecord() (bool, error) { } if rt == recLast || rt == recFull { r.index = 0 - if compressed && len(r.snappyBuf) > 0 { + if isSnappyCompressed && len(r.compressBuf) > 0 { // The snappy library uses `len` to calculate if we need a new buffer. // In order to allocate as few buffers as possible make the length // equal to the capacity. r.rec = r.rec[:cap(r.rec)] - r.rec, err = snappy.Decode(r.rec, r.snappyBuf) + r.rec, err = snappy.Decode(r.rec, r.compressBuf) + if err != nil { + return false, err + } + } else if isZstdCompressed && len(r.compressBuf) > 0 { + r.rec, err = r.zstdReader.DecodeAll(r.compressBuf, r.rec[:0]) if err != nil { return false, err } diff --git a/tsdb/wlog/reader.go b/tsdb/wlog/reader.go index cba216764..f77b03b8e 100644 --- a/tsdb/wlog/reader.go +++ b/tsdb/wlog/reader.go @@ -20,23 +20,27 @@ import ( "io" "github.com/golang/snappy" + "github.com/klauspost/compress/zstd" "github.com/pkg/errors" ) // Reader reads WAL records from an io.Reader. type Reader struct { - rdr io.Reader - err error - rec []byte - snappyBuf []byte - buf [pageSize]byte - total int64 // Total bytes processed. - curRecTyp recType // Used for checking that the last record is not torn. + rdr io.Reader + err error + rec []byte + compressBuf []byte + zstdReader *zstd.Decoder + buf [pageSize]byte + total int64 // Total bytes processed. + curRecTyp recType // Used for checking that the last record is not torn. } // NewReader returns a new reader. func NewReader(r io.Reader) *Reader { - return &Reader{rdr: r} + // Calling zstd.NewReader with a nil io.Reader and no options cannot return an error. + zstdReader, _ := zstd.NewReader(nil) + return &Reader{rdr: r, zstdReader: zstdReader} } // Next advances the reader to the next records and returns true if it exists. @@ -63,7 +67,7 @@ func (r *Reader) next() (err error) { buf := r.buf[recordHeaderSize:] r.rec = r.rec[:0] - r.snappyBuf = r.snappyBuf[:0] + r.compressBuf = r.compressBuf[:0] i := 0 for { @@ -72,7 +76,8 @@ func (r *Reader) next() (err error) { } r.total++ r.curRecTyp = recTypeFromHeader(hdr[0]) - compressed := hdr[0]&snappyMask != 0 + isSnappyCompressed := hdr[0]&snappyMask == snappyMask + isZstdCompressed := hdr[0]&zstdMask == zstdMask // Gobble up zero bytes. if r.curRecTyp == recPageTerm { @@ -128,8 +133,8 @@ func (r *Reader) next() (err error) { return errors.Errorf("unexpected checksum %x, expected %x", c, crc) } - if compressed { - r.snappyBuf = append(r.snappyBuf, buf[:length]...) + if isSnappyCompressed || isZstdCompressed { + r.compressBuf = append(r.compressBuf, buf[:length]...) } else { r.rec = append(r.rec, buf[:length]...) } @@ -138,12 +143,15 @@ func (r *Reader) next() (err error) { return err } if r.curRecTyp == recLast || r.curRecTyp == recFull { - if compressed && len(r.snappyBuf) > 0 { + if isSnappyCompressed && len(r.compressBuf) > 0 { // The snappy library uses `len` to calculate if we need a new buffer. // In order to allocate as few buffers as possible make the length // equal to the capacity. r.rec = r.rec[:cap(r.rec)] - r.rec, err = snappy.Decode(r.rec, r.snappyBuf) + r.rec, err = snappy.Decode(r.rec, r.compressBuf) + return err + } else if isZstdCompressed && len(r.compressBuf) > 0 { + r.rec, err = r.zstdReader.DecodeAll(r.compressBuf, r.rec[:0]) return err } return nil diff --git a/tsdb/wlog/reader_test.go b/tsdb/wlog/reader_test.go index 737520e76..2c4dd622c 100644 --- a/tsdb/wlog/reader_test.go +++ b/tsdb/wlog/reader_test.go @@ -310,8 +310,8 @@ func allSegments(dir string) (io.ReadCloser, error) { func TestReaderFuzz(t *testing.T) { for name, fn := range readerConstructors { - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("%s,compress=%t", name, compress), func(t *testing.T) { + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + t.Run(fmt.Sprintf("%s,compress=%s", name, compress), func(t *testing.T) { dir := t.TempDir() w, err := NewSize(nil, nil, dir, 128*pageSize, compress) @@ -349,8 +349,8 @@ func TestReaderFuzz(t *testing.T) { func TestReaderFuzz_Live(t *testing.T) { logger := testutil.NewLogger(t) - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { dir := t.TempDir() w, err := NewSize(nil, nil, dir, 128*pageSize, compress) @@ -439,7 +439,7 @@ func TestLiveReaderCorrupt_ShortFile(t *testing.T) { logger := testutil.NewLogger(t) dir := t.TempDir() - w, err := NewSize(nil, nil, dir, pageSize, false) + w, err := NewSize(nil, nil, dir, pageSize, CompressionNone) require.NoError(t, err) rec := make([]byte, pageSize-recordHeaderSize) @@ -479,7 +479,7 @@ func TestLiveReaderCorrupt_RecordTooLongAndShort(t *testing.T) { logger := testutil.NewLogger(t) dir := t.TempDir() - w, err := NewSize(nil, nil, dir, pageSize*2, false) + w, err := NewSize(nil, nil, dir, pageSize*2, CompressionNone) require.NoError(t, err) rec := make([]byte, pageSize-recordHeaderSize) @@ -526,7 +526,7 @@ func TestReaderData(t *testing.T) { for name, fn := range readerConstructors { t.Run(name, func(t *testing.T) { - w, err := New(nil, nil, dir, true) + w, err := New(nil, nil, dir, CompressionSnappy) require.NoError(t, err) sr, err := allSegments(dir) diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go index 94b6a92d1..bc6a10126 100644 --- a/tsdb/wlog/watcher_test.go +++ b/tsdb/wlog/watcher_test.go @@ -122,8 +122,8 @@ func TestTailSamples(t *testing.T) { const samplesCount = 250 const exemplarsCount = 25 const histogramsCount = 50 - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { now := time.Now() dir := t.TempDir() @@ -246,8 +246,8 @@ func TestReadToEndNoCheckpoint(t *testing.T) { const seriesCount = 10 const samplesCount = 250 - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") err := os.Mkdir(wdir, 0o777) @@ -314,8 +314,8 @@ func TestReadToEndWithCheckpoint(t *testing.T) { const seriesCount = 10 const samplesCount = 250 - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") @@ -402,8 +402,8 @@ func TestReadCheckpoint(t *testing.T) { const seriesCount = 10 const samplesCount = 250 - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") @@ -475,8 +475,8 @@ func TestReadCheckpointMultipleSegments(t *testing.T) { const seriesCount = 20 const samplesCount = 300 - for _, compress := range []bool{false, true} { - t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") @@ -546,15 +546,15 @@ func TestCheckpointSeriesReset(t *testing.T) { const seriesCount = 20 const samplesCount = 350 testCases := []struct { - compress bool + compress CompressionType segments int }{ - {compress: false, segments: 14}, - {compress: true, segments: 13}, + {compress: CompressionNone, segments: 14}, + {compress: CompressionSnappy, segments: 13}, } for _, tc := range testCases { - t.Run(fmt.Sprintf("compress=%t", tc.compress), func(t *testing.T) { + t.Run(fmt.Sprintf("compress=%s", tc.compress), func(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") diff --git a/tsdb/wlog/wlog.go b/tsdb/wlog/wlog.go index b7b1623f9..d898ebd7a 100644 --- a/tsdb/wlog/wlog.go +++ b/tsdb/wlog/wlog.go @@ -29,6 +29,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/golang/snappy" + "github.com/klauspost/compress/zstd" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "golang.org/x/exp/slices" @@ -164,6 +165,26 @@ func OpenReadSegment(fn string) (*Segment, error) { return &Segment{SegmentFile: f, i: k, dir: filepath.Dir(fn)}, nil } +type CompressionType string + +const ( + CompressionNone CompressionType = "none" + CompressionSnappy CompressionType = "snappy" + CompressionZstd CompressionType = "zstd" +) + +// ParseCompressionType parses the two compression-related configuration values and returns the CompressionType. If +// compression is enabled but the compressType is unrecognized, we default to Snappy compression. +func ParseCompressionType(compress bool, compressType string) CompressionType { + if compress { + if compressType == "zstd" { + return CompressionZstd + } + return CompressionSnappy + } + return CompressionNone +} + // WL is a write log that stores records in segment files. // It must be read from start to end once before logging new data. // If an error occurs during read, the repair procedure must be called @@ -185,8 +206,9 @@ type WL struct { stopc chan chan struct{} actorc chan func() closed bool // To allow calling Close() more than once without blocking. - compress bool - snappyBuf []byte + compress CompressionType + compressBuf []byte + zstdWriter *zstd.Encoder WriteNotified WriteNotified @@ -265,13 +287,13 @@ func newWLMetrics(w *WL, r prometheus.Registerer) *wlMetrics { } // New returns a new WAL over the given directory. -func New(logger log.Logger, reg prometheus.Registerer, dir string, compress bool) (*WL, error) { +func New(logger log.Logger, reg prometheus.Registerer, dir string, compress CompressionType) (*WL, error) { return NewSize(logger, reg, dir, DefaultSegmentSize, compress) } // NewSize returns a new write log over the given directory. // New segments are created with the specified size. -func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress bool) (*WL, error) { +func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress CompressionType) (*WL, error) { if segmentSize%pageSize != 0 { return nil, errors.New("invalid segment size") } @@ -281,6 +303,16 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi if logger == nil { logger = log.NewNopLogger() } + + var zstdWriter *zstd.Encoder + if compress == CompressionZstd { + var err error + zstdWriter, err = zstd.NewWriter(nil) + if err != nil { + return nil, err + } + } + w := &WL{ dir: dir, logger: logger, @@ -289,6 +321,7 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi actorc: make(chan func(), 100), stopc: make(chan chan struct{}), compress: compress, + zstdWriter: zstdWriter, } prefix := "prometheus_tsdb_wal_" if filepath.Base(dir) == WblDirName { @@ -327,16 +360,22 @@ func Open(logger log.Logger, dir string) (*WL, error) { if logger == nil { logger = log.NewNopLogger() } + zstdWriter, err := zstd.NewWriter(nil) + if err != nil { + return nil, err + } + w := &WL{ - dir: dir, - logger: logger, + dir: dir, + logger: logger, + zstdWriter: zstdWriter, } return w, nil } -// CompressionEnabled returns if compression is enabled on this WAL. -func (w *WL) CompressionEnabled() bool { +// CompressionType returns if compression is enabled on this WAL. +func (w *WL) CompressionType() CompressionType { return w.compress } @@ -583,9 +622,10 @@ func (w *WL) flushPage(clear bool) error { } // First Byte of header format: -// [ 4 bits unallocated] [1 bit snappy compression flag] [ 3 bit record type ] +// [3 bits unallocated] [1 bit zstd compression flag] [1 bit snappy compression flag] [3 bit record type ] const ( snappyMask = 1 << 3 + zstdMask = 1 << 4 recTypeMask = snappyMask - 1 ) @@ -655,17 +695,23 @@ func (w *WL) log(rec []byte, final bool) error { // Compress the record before calculating if a new segment is needed. compressed := false - if w.compress && - len(rec) > 0 && + if w.compress == CompressionSnappy && len(rec) > 0 { // If MaxEncodedLen is less than 0 the record is too large to be compressed. - snappy.MaxEncodedLen(len(rec)) >= 0 { - // The snappy library uses `len` to calculate if we need a new buffer. - // In order to allocate as few buffers as possible make the length - // equal to the capacity. - w.snappyBuf = w.snappyBuf[:cap(w.snappyBuf)] - w.snappyBuf = snappy.Encode(w.snappyBuf, rec) - if len(w.snappyBuf) < len(rec) { - rec = w.snappyBuf + if len(rec) > 0 && snappy.MaxEncodedLen(len(rec)) >= 0 { + // The snappy library uses `len` to calculate if we need a new buffer. + // In order to allocate as few buffers as possible make the length + // equal to the capacity. + w.compressBuf = w.compressBuf[:cap(w.compressBuf)] + w.compressBuf = snappy.Encode(w.compressBuf, rec) + if len(w.compressBuf) < len(rec) { + rec = w.compressBuf + compressed = true + } + } + } else if w.compress == CompressionZstd && len(rec) > 0 { + w.compressBuf = w.zstdWriter.EncodeAll(rec, w.compressBuf[:0]) + if len(w.compressBuf) < len(rec) { + rec = w.compressBuf compressed = true } } @@ -706,7 +752,11 @@ func (w *WL) log(rec []byte, final bool) error { typ = recMiddle } if compressed { - typ |= snappyMask + if w.compress == CompressionSnappy { + typ |= snappyMask + } else if w.compress == CompressionZstd { + typ |= zstdMask + } } buf[0] = byte(typ) diff --git a/tsdb/wlog/wlog_test.go b/tsdb/wlog/wlog_test.go index 3d208baa3..f9ce225b3 100644 --- a/tsdb/wlog/wlog_test.go +++ b/tsdb/wlog/wlog_test.go @@ -124,7 +124,7 @@ func TestWALRepair_ReadingError(t *testing.T) { // then corrupt a given record in a given segment. // As a result we want a repaired WAL with given intact records. segSize := 3 * pageSize - w, err := NewSize(nil, nil, dir, segSize, false) + w, err := NewSize(nil, nil, dir, segSize, CompressionNone) require.NoError(t, err) var records [][]byte @@ -149,7 +149,7 @@ func TestWALRepair_ReadingError(t *testing.T) { require.NoError(t, f.Close()) - w, err = NewSize(nil, nil, dir, segSize, false) + w, err = NewSize(nil, nil, dir, segSize, CompressionNone) require.NoError(t, err) defer w.Close() @@ -223,7 +223,7 @@ func TestCorruptAndCarryOn(t *testing.T) { // Produce a WAL with a two segments of 3 pages with 3 records each, // so when we truncate the file we're guaranteed to split a record. { - w, err := NewSize(logger, nil, dir, segmentSize, false) + w, err := NewSize(logger, nil, dir, segmentSize, CompressionNone) require.NoError(t, err) for i := 0; i < 18; i++ { @@ -294,7 +294,7 @@ func TestCorruptAndCarryOn(t *testing.T) { err = sr.Close() require.NoError(t, err) - w, err := NewSize(logger, nil, dir, segmentSize, false) + w, err := NewSize(logger, nil, dir, segmentSize, CompressionNone) require.NoError(t, err) err = w.Repair(corruptionErr) @@ -337,7 +337,7 @@ func TestCorruptAndCarryOn(t *testing.T) { // TestClose ensures that calling Close more than once doesn't panic and doesn't block. func TestClose(t *testing.T) { dir := t.TempDir() - w, err := NewSize(nil, nil, dir, pageSize, false) + w, err := NewSize(nil, nil, dir, pageSize, CompressionNone) require.NoError(t, err) require.NoError(t, w.Close()) require.Error(t, w.Close()) @@ -350,7 +350,7 @@ func TestSegmentMetric(t *testing.T) { ) dir := t.TempDir() - w, err := NewSize(nil, nil, dir, segmentSize, false) + w, err := NewSize(nil, nil, dir, segmentSize, CompressionNone) require.NoError(t, err) initialSegment := client_testutil.ToFloat64(w.metrics.currentSegment) @@ -369,7 +369,7 @@ func TestSegmentMetric(t *testing.T) { } func TestCompression(t *testing.T) { - bootstrap := func(compressed bool) string { + bootstrap := func(compressed CompressionType) string { const ( segmentSize = pageSize recordSize = (pageSize / 2) - recordHeaderSize @@ -390,21 +390,27 @@ func TestCompression(t *testing.T) { return dirPath } - dirCompressed := bootstrap(true) + tmpDirs := make([]string, 0, 3) defer func() { - require.NoError(t, os.RemoveAll(dirCompressed)) - }() - dirUnCompressed := bootstrap(false) - defer func() { - require.NoError(t, os.RemoveAll(dirUnCompressed)) + for _, dir := range tmpDirs { + require.NoError(t, os.RemoveAll(dir)) + } }() - uncompressedSize, err := fileutil.DirSize(dirUnCompressed) - require.NoError(t, err) - compressedSize, err := fileutil.DirSize(dirCompressed) - require.NoError(t, err) + dirUnCompressed := bootstrap(CompressionNone) + tmpDirs = append(tmpDirs, dirUnCompressed) - require.Greater(t, float64(uncompressedSize)*0.75, float64(compressedSize), "Compressing zeroes should save at least 25%% space - uncompressedSize: %d, compressedSize: %d", uncompressedSize, compressedSize) + for _, compressionType := range []CompressionType{CompressionSnappy, CompressionZstd} { + dirCompressed := bootstrap(compressionType) + tmpDirs = append(tmpDirs, dirCompressed) + + uncompressedSize, err := fileutil.DirSize(dirUnCompressed) + require.NoError(t, err) + compressedSize, err := fileutil.DirSize(dirCompressed) + require.NoError(t, err) + + require.Greater(t, float64(uncompressedSize)*0.75, float64(compressedSize), "Compressing zeroes should save at least 25%% space - uncompressedSize: %d, compressedSize: %d", uncompressedSize, compressedSize) + } } func TestLogPartialWrite(t *testing.T) { @@ -438,7 +444,7 @@ func TestLogPartialWrite(t *testing.T) { t.Run(testName, func(t *testing.T) { dirPath := t.TempDir() - w, err := NewSize(nil, nil, dirPath, segmentSize, false) + w, err := NewSize(nil, nil, dirPath, segmentSize, CompressionNone) require.NoError(t, err) // Replace the underlying segment file with a mocked one that injects a failure. @@ -505,8 +511,8 @@ func (f *faultySegmentFile) Write(p []byte) (int, error) { } func BenchmarkWAL_LogBatched(b *testing.B) { - for _, compress := range []bool{true, false} { - b.Run(fmt.Sprintf("compress=%t", compress), func(b *testing.B) { + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + b.Run(fmt.Sprintf("compress=%s", compress), func(b *testing.B) { dir := b.TempDir() w, err := New(nil, nil, dir, compress) @@ -535,8 +541,8 @@ func BenchmarkWAL_LogBatched(b *testing.B) { } func BenchmarkWAL_Log(b *testing.B) { - for _, compress := range []bool{true, false} { - b.Run(fmt.Sprintf("compress=%t", compress), func(b *testing.B) { + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + b.Run(fmt.Sprintf("compress=%s", compress), func(b *testing.B) { dir := b.TempDir() w, err := New(nil, nil, dir, compress) From a462f7fa2187edbb6ba2f286efe9fb36c122ed86 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Mon, 27 Mar 2023 14:54:29 -0700 Subject: [PATCH 244/632] Add function for iterating through all buckets in reverse to find max bucket Signed-off-by: Carrie Edwards --- model/histogram/float_histogram.go | 70 ++++++++ model/histogram/float_histogram_test.go | 226 ++++++++++++++++++++++++ 2 files changed, 296 insertions(+) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index f4ee13fac..978045696 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -622,6 +622,20 @@ func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { } } +// AllReverseBucketIterator returns a BucketIterator to iterate over all negative, +// zero, and positive buckets in descending order (starting at the lowest bucket +// and going up). If the highest negative bucket or the lowest positive bucket +// overlap with the zero bucket, their upper or lower boundary, respectively, is +// set to the zero threshold. +func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] { + return &allReverseFloatBucketIterator{ + h: h, + negIter: h.NegativeBucketIterator(), + posIter: h.PositiveReverseBucketIterator(), + state: 1, + } +} + // zeroCountForLargerThreshold returns what the histogram's zero count would be // if the ZeroThreshold had the provided larger (or equal) value. If the // provided value is less than the histogram's ZeroThreshold, the method panics. @@ -957,3 +971,59 @@ func (i *allFloatBucketIterator) Next() bool { func (i *allFloatBucketIterator) At() Bucket[float64] { return i.currBucket } + +type allReverseFloatBucketIterator struct { + h *FloatHistogram + negIter, posIter BucketIterator[float64] + // 1 means we are iterating positive buckets. + // 0 means it is time for the zero bucket. + // -1 means we are iterating negative buckets. + // Anything else means iteration is over. + state int8 + currBucket Bucket[float64] +} + +func (i *allReverseFloatBucketIterator) Next() bool { + switch i.state { + case 1: + if i.posIter.Next() { + i.currBucket = i.posIter.At() + if i.currBucket.Lower < i.h.ZeroThreshold { + i.currBucket.Lower = i.h.ZeroThreshold + } + return true + } + i.state = 0 + return i.Next() + case 0: + i.state = -1 + if i.h.ZeroCount > 0 { + i.currBucket = Bucket[float64]{ + Lower: -i.h.ZeroThreshold, + Upper: i.h.ZeroThreshold, + LowerInclusive: true, + UpperInclusive: true, + Count: i.h.ZeroCount, + // Index is irrelevant for the zero bucket. + } + return true + } + return i.Next() + case -1: + if i.negIter.Next() { + i.currBucket = i.negIter.At() + if i.currBucket.Upper > -i.h.ZeroThreshold { + i.currBucket.Upper = -i.h.ZeroThreshold + } + return true + } + i.state = 42 + return false + } + + return false +} + +func (i *allReverseFloatBucketIterator) At() Bucket[float64] { + return i.currBucket +} diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index 242ef4c92..ce749b710 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -1979,3 +1979,229 @@ func TestAllFloatBucketIterator(t *testing.T) { }) } } + +func TestAllReverseFloatBucketIterator(t *testing.T) { + cases := []struct { + h FloatHistogram + // To determine the expected buckets. + includeNeg, includeZero, includePos bool + }{ + { + h: FloatHistogram{ + Count: 405, + ZeroCount: 102, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + }, + includeNeg: true, + includeZero: true, + includePos: true, + }, + { + h: FloatHistogram{ + Count: 405, + ZeroCount: 102, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + }, + includeNeg: true, + includeZero: true, + includePos: false, + }, + { + h: FloatHistogram{ + Count: 405, + ZeroCount: 102, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + }, + includeNeg: false, + includeZero: true, + includePos: true, + }, + { + h: FloatHistogram{ + Count: 405, + ZeroCount: 102, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + }, + includeNeg: false, + includeZero: true, + includePos: false, + }, + { + h: FloatHistogram{ + Count: 405, + ZeroCount: 0, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + }, + includeNeg: true, + includeZero: false, + includePos: true, + }, + { + h: FloatHistogram{ + Count: 447, + ZeroCount: 42, + ZeroThreshold: 0.5, // Coinciding with bucket boundary. + Sum: 1008.4, + Schema: 0, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + }, + includeNeg: true, + includeZero: true, + includePos: true, + }, + { + h: FloatHistogram{ + Count: 447, + ZeroCount: 42, + ZeroThreshold: 0.6, // Within the bucket closest to zero. + Sum: 1008.4, + Schema: 0, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + }, + includeNeg: true, + includeZero: true, + includePos: true, + }, + } + + for i, c := range cases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + var expBuckets, actBuckets []Bucket[float64] + + if c.includePos { + it := c.h.PositiveReverseBucketIterator() + for it.Next() { + b := it.At() + if c.includeZero && b.Lower < c.h.ZeroThreshold { + b.Lower = c.h.ZeroThreshold + } + expBuckets = append(expBuckets, b) + } + } + if c.includeZero { + expBuckets = append(expBuckets, Bucket[float64]{ + Lower: -c.h.ZeroThreshold, + Upper: c.h.ZeroThreshold, + LowerInclusive: true, + UpperInclusive: true, + Count: c.h.ZeroCount, + }) + } + if c.includeNeg { + it := c.h.NegativeBucketIterator() + for it.Next() { + b := it.At() + if c.includeZero && b.Upper > -c.h.ZeroThreshold { + b.Upper = -c.h.ZeroThreshold + } + expBuckets = append(expBuckets, b) + } + } + + it := c.h.AllReverseBucketIterator() + for it.Next() { + actBuckets = append(actBuckets, it.At()) + } + + require.Equal(t, expBuckets, actBuckets) + }) + } +} From bc0ee4a469978d0e12486aaab6ad3720c40384a2 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Mon, 27 Mar 2023 14:55:06 -0700 Subject: [PATCH 245/632] Implement native histogram min and max query functions Signed-off-by: Carrie Edwards --- promql/functions.go | 62 ++++++++++++++++++++++++++++++++++++++ promql/parser/functions.go | 10 ++++++ 2 files changed, 72 insertions(+) diff --git a/promql/functions.go b/promql/functions.go index 96bffab96..179d8b23e 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -996,6 +996,66 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod return enh.Out } +// === histogram_min(Vector parser.ValueTypeVector) Vector === +func funcHistogramMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + + min := math.NaN() // initialize to NaN in case histogram is empty + + it := sample.H.AllBucketIterator() // AllBucketIterator starts at the lowest bucket in the native histogram + for it.Next() { + bucket := it.At() + // Find the lower limit of the lowest populated bucket + if bucket.Count > 0 { + min = bucket.Lower + break + } + } + + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + Point: Point{V: min}, + }) + } + return enh.Out +} + +// === histogram_max(Vector parser.ValueTypeVector) Vector === +func funcHistogramMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + + max := math.NaN() // initialize to NaN in case histogram is empty + + it := sample.H.AllReverseBucketIterator() // AllReverseBucketIterator starts at the highest bucket in the native histogram + for it.Next() { + bucket := it.At() + // Find the upper limit of the highest populated bucket + if bucket.Count > 0 { + max = bucket.Upper + break + } + } + + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + Point: Point{V: max}, + }) + } + return enh.Out +} + // === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { lower := vals[0].(Vector)[0].F @@ -1375,6 +1435,8 @@ var FunctionCalls = map[string]FunctionCall{ "floor": funcFloor, "histogram_count": funcHistogramCount, "histogram_fraction": funcHistogramFraction, + "histogram_max": funcHistogramMax, + "histogram_min": funcHistogramMin, "histogram_quantile": funcHistogramQuantile, "histogram_sum": funcHistogramSum, "holt_winters": funcHoltWinters, diff --git a/promql/parser/functions.go b/promql/parser/functions.go index 479c7f635..7c337412f 100644 --- a/promql/parser/functions.go +++ b/promql/parser/functions.go @@ -173,6 +173,16 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeVector}, ReturnType: ValueTypeVector, }, + "histogram_min": { + Name: "histogram_min", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "histogram_max": { + Name: "histogram_max", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, "histogram_fraction": { Name: "histogram_fraction", ArgTypes: []ValueType{ValueTypeScalar, ValueTypeScalar, ValueTypeVector}, From 2f9bc98b8afb389c5582fcf750bc1ebe09b5c5f4 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Mon, 27 Mar 2023 14:55:33 -0700 Subject: [PATCH 246/632] Add tests for min and max functions Signed-off-by: Carrie Edwards --- promql/engine_test.go | 206 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 206 insertions(+) diff --git a/promql/engine_test.go b/promql/engine_test.go index 5ffebc202..b5cc28c21 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -3295,6 +3295,212 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { } } +func TestNativeHistogram_HistogramMinAndMax(t *testing.T) { + // TODO(carrieedwards): Integrate histograms into the PromQL testing framework + // and write more tests there. + cases := []struct { + text string + // Histogram to test. + h *histogram.Histogram + // Expected + expectedMin float64 + expectedMax float64 + }{ + { + text: "all negative buckets", + h: &histogram.Histogram{ + Count: 12, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{2, 1, -2, 3}, + }, + expectedMin: -16, + expectedMax: -0.5, + }, + { + text: "all positive buckets", + h: &histogram.Histogram{ + Count: 12, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -2, 3}, + }, + expectedMin: 0.5, + expectedMax: 16, + }, + { + text: "all negative buckets", + h: &histogram.Histogram{ + Count: 12, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{2, 1, -2, 3}, + }, + expectedMin: -16, + expectedMax: -0.5, + }, + { + text: "both positive and negative buckets", + h: &histogram.Histogram{ + Count: 24, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -2, 3}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{2, 1, -2, 3}, + }, + expectedMin: -16, + expectedMax: 16, + }, + { + text: "all positive buckets with zero bucket count", + h: &histogram.Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -2, 3}, + }, + expectedMin: -0.001, + expectedMax: 16, + }, + { + text: "all negative buckets with zero bucket count", + h: &histogram.Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{2, 1, -2, 3}, + }, + expectedMin: -16, + expectedMax: 0.001, + }, + { + text: "both positive and negative buckets with zero bucket count", + h: &histogram.Histogram{ + Count: 24, + ZeroCount: 4, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -2, 3}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{2, 1, -2, 3}, + }, + expectedMin: -16, + expectedMax: 16, + }, + { + text: "empty histogram", + h: &histogram.Histogram{}, + expectedMin: math.NaN(), + expectedMax: math.NaN(), + }, + } + + test, err := NewTest(t, "") + require.NoError(t, err) + t.Cleanup(test.Close) + idx := int64(0) + for _, floatHisto := range []bool{true, false} { + for _, c := range cases { + t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) { + seriesName := "sparse_histogram_series" + lbls := labels.FromStrings("__name__", seriesName) + engine := test.QueryEngine() + + ts := idx * int64(10*time.Minute/time.Millisecond) + app := test.Storage().Appender(context.TODO()) + if floatHisto { + _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat()) + } else { + _, err = app.AppendHistogram(0, lbls, ts, c.h, nil) + } + require.NoError(t, err) + require.NoError(t, app.Commit()) + + queryString := fmt.Sprintf("histogram_min(%s)", seriesName) + qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res := qry.Exec(test.Context()) + require.NoError(t, res.Err) + + vector, err := res.Vector() + require.NoError(t, err) + + require.Len(t, vector, 1) + require.Nil(t, vector[0].H) + if math.IsNaN(c.expectedMin) { + require.True(t, math.IsNaN(vector[0].V)) + } else { + require.Equal(t, float64(c.expectedMin), vector[0].V) + } + + queryString = fmt.Sprintf("histogram_max(%s)", seriesName) + qry, err = engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res = qry.Exec(test.Context()) + require.NoError(t, res.Err) + + vector, err = res.Vector() + require.NoError(t, err) + + require.Len(t, vector, 1) + require.Nil(t, vector[0].H) + if math.IsNaN(c.expectedMax) { + require.True(t, math.IsNaN(vector[0].V)) + } else { + require.Equal(t, c.expectedMax, vector[0].V) + } + idx++ + }) + } + } +} + func TestNativeHistogram_HistogramQuantile(t *testing.T) { // TODO(codesome): Integrate histograms into the PromQL testing framework // and write more tests there. From f93ac97867d1972a38ac1647794d024506bb6be5 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Mon, 27 Mar 2023 14:55:51 -0700 Subject: [PATCH 247/632] Update querying function docs Signed-off-by: Carrie Edwards --- docs/querying/functions.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index e1a0b4a76..57b7953b9 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -200,6 +200,28 @@ observed values (in this case corresponding to “average request duration”): / histogram_count(rate(http_request_duration_seconds[10m])) +## `histogram_min()` + +_This function only acts on native histograms, which are an experimental +feature. The behavior of this function may change in future versions of +Prometheus, including its removal from PromQL._ + +`histogram_min(v instant-vector)` returns the estimated minimum value stored in +a native histogram. This estimation is based on the lower boundary of the lowest +bucket that contains values in the native histogram. Samples that are not native +histograms are ignored and do not show up in the returned vector. + +## `histogram_max()` + +_This function only acts on native histograms, which are an experimental +feature. The behavior of this function may change in future versions of +Prometheus, including its removal from PromQL._ + +`histogram_max(v instant-vector)` returns the estimated maximum value stored in +a native histogram. This estimation is based on the upper boundary of the highest +bucket that contains values in the native histogram. Samples that are not native +histograms are ignored and do not show up in the returned vector. + ## `histogram_fraction()` _This function only acts on native histograms, which are an experimental From 42d9169ba1f6e3fa808efd9cc733313723f498d7 Mon Sep 17 00:00:00 2001 From: Ziqi Zhao Date: Wed, 5 Jul 2023 19:05:53 +0800 Subject: [PATCH 248/632] enhance histogram_quantile to get min/max value Signed-off-by: Ziqi Zhao --- docs/querying/functions.md | 28 +--- model/histogram/float_histogram.go | 96 ++++---------- promql/engine_test.go | 206 ----------------------------- promql/functions.go | 62 --------- promql/parser/functions.go | 10 -- promql/quantile.go | 26 +++- 6 files changed, 52 insertions(+), 376 deletions(-) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index 57b7953b9..55ed92ecc 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -200,28 +200,6 @@ observed values (in this case corresponding to “average request duration”): / histogram_count(rate(http_request_duration_seconds[10m])) -## `histogram_min()` - -_This function only acts on native histograms, which are an experimental -feature. The behavior of this function may change in future versions of -Prometheus, including its removal from PromQL._ - -`histogram_min(v instant-vector)` returns the estimated minimum value stored in -a native histogram. This estimation is based on the lower boundary of the lowest -bucket that contains values in the native histogram. Samples that are not native -histograms are ignored and do not show up in the returned vector. - -## `histogram_max()` - -_This function only acts on native histograms, which are an experimental -feature. The behavior of this function may change in future versions of -Prometheus, including its removal from PromQL._ - -`histogram_max(v instant-vector)` returns the estimated maximum value stored in -a native histogram. This estimation is based on the upper boundary of the highest -bucket that contains values in the native histogram. Samples that are not native -histograms are ignored and do not show up in the returned vector. - ## `histogram_fraction()` _This function only acts on native histograms, which are an experimental @@ -339,6 +317,12 @@ bound of that bucket is greater than bucket. Otherwise, the upper bound of the lowest bucket is returned for quantiles located in the lowest bucket. +You can use `histogram_quantile(0, v instant-vector)` to get the estimated minimum value stored in +a histogram. + +You can use `histogram_quantile(1, v instant-vector)` to get the estimated maximum value stored in +a histogram. + ## `holt_winters()` diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 978045696..2ced09016 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -615,10 +615,10 @@ func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] // set to the zero threshold. func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { return &allFloatBucketIterator{ - h: h, - negIter: h.NegativeReverseBucketIterator(), - posIter: h.PositiveBucketIterator(), - state: -1, + h: h, + leftIter: h.NegativeReverseBucketIterator(), + rightIter: h.PositiveBucketIterator(), + state: -1, } } @@ -628,11 +628,11 @@ func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { // overlap with the zero bucket, their upper or lower boundary, respectively, is // set to the zero threshold. func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] { - return &allReverseFloatBucketIterator{ - h: h, - negIter: h.NegativeBucketIterator(), - posIter: h.PositiveReverseBucketIterator(), - state: 1, + return &allFloatBucketIterator{ + h: h, + leftIter: h.PositiveReverseBucketIterator(), + rightIter: h.NegativeBucketIterator(), + state: -1, } } @@ -917,8 +917,8 @@ func (i *reverseFloatBucketIterator) Next() bool { } type allFloatBucketIterator struct { - h *FloatHistogram - negIter, posIter BucketIterator[float64] + h *FloatHistogram + leftIter, rightIter BucketIterator[float64] // -1 means we are iterating negative buckets. // 0 means it is time for the zero bucket. // 1 means we are iterating positive buckets. @@ -930,10 +930,13 @@ type allFloatBucketIterator struct { func (i *allFloatBucketIterator) Next() bool { switch i.state { case -1: - if i.negIter.Next() { - i.currBucket = i.negIter.At() - if i.currBucket.Upper > -i.h.ZeroThreshold { + if i.leftIter.Next() { + i.currBucket = i.leftIter.At() + switch { + case i.currBucket.Upper < 0 && i.currBucket.Upper > -i.h.ZeroThreshold: i.currBucket.Upper = -i.h.ZeroThreshold + case i.currBucket.Lower > 0 && i.currBucket.Lower < i.h.ZeroThreshold: + i.currBucket.Lower = i.h.ZeroThreshold } return true } @@ -954,10 +957,13 @@ func (i *allFloatBucketIterator) Next() bool { } return i.Next() case 1: - if i.posIter.Next() { - i.currBucket = i.posIter.At() - if i.currBucket.Lower < i.h.ZeroThreshold { + if i.rightIter.Next() { + i.currBucket = i.rightIter.At() + switch { + case i.currBucket.Lower > 0 && i.currBucket.Lower < i.h.ZeroThreshold: i.currBucket.Lower = i.h.ZeroThreshold + case i.currBucket.Upper < 0 && i.currBucket.Upper > -i.h.ZeroThreshold: + i.currBucket.Upper = -i.h.ZeroThreshold } return true } @@ -971,59 +977,3 @@ func (i *allFloatBucketIterator) Next() bool { func (i *allFloatBucketIterator) At() Bucket[float64] { return i.currBucket } - -type allReverseFloatBucketIterator struct { - h *FloatHistogram - negIter, posIter BucketIterator[float64] - // 1 means we are iterating positive buckets. - // 0 means it is time for the zero bucket. - // -1 means we are iterating negative buckets. - // Anything else means iteration is over. - state int8 - currBucket Bucket[float64] -} - -func (i *allReverseFloatBucketIterator) Next() bool { - switch i.state { - case 1: - if i.posIter.Next() { - i.currBucket = i.posIter.At() - if i.currBucket.Lower < i.h.ZeroThreshold { - i.currBucket.Lower = i.h.ZeroThreshold - } - return true - } - i.state = 0 - return i.Next() - case 0: - i.state = -1 - if i.h.ZeroCount > 0 { - i.currBucket = Bucket[float64]{ - Lower: -i.h.ZeroThreshold, - Upper: i.h.ZeroThreshold, - LowerInclusive: true, - UpperInclusive: true, - Count: i.h.ZeroCount, - // Index is irrelevant for the zero bucket. - } - return true - } - return i.Next() - case -1: - if i.negIter.Next() { - i.currBucket = i.negIter.At() - if i.currBucket.Upper > -i.h.ZeroThreshold { - i.currBucket.Upper = -i.h.ZeroThreshold - } - return true - } - i.state = 42 - return false - } - - return false -} - -func (i *allReverseFloatBucketIterator) At() Bucket[float64] { - return i.currBucket -} diff --git a/promql/engine_test.go b/promql/engine_test.go index b5cc28c21..5ffebc202 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -3295,212 +3295,6 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { } } -func TestNativeHistogram_HistogramMinAndMax(t *testing.T) { - // TODO(carrieedwards): Integrate histograms into the PromQL testing framework - // and write more tests there. - cases := []struct { - text string - // Histogram to test. - h *histogram.Histogram - // Expected - expectedMin float64 - expectedMax float64 - }{ - { - text: "all negative buckets", - h: &histogram.Histogram{ - Count: 12, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - expectedMin: -16, - expectedMax: -0.5, - }, - { - text: "all positive buckets", - h: &histogram.Histogram{ - Count: 12, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, - }, - expectedMin: 0.5, - expectedMax: 16, - }, - { - text: "all negative buckets", - h: &histogram.Histogram{ - Count: 12, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - expectedMin: -16, - expectedMax: -0.5, - }, - { - text: "both positive and negative buckets", - h: &histogram.Histogram{ - Count: 24, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - expectedMin: -16, - expectedMax: 16, - }, - { - text: "all positive buckets with zero bucket count", - h: &histogram.Histogram{ - Count: 12, - ZeroCount: 2, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, - }, - expectedMin: -0.001, - expectedMax: 16, - }, - { - text: "all negative buckets with zero bucket count", - h: &histogram.Histogram{ - Count: 12, - ZeroCount: 2, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - expectedMin: -16, - expectedMax: 0.001, - }, - { - text: "both positive and negative buckets with zero bucket count", - h: &histogram.Histogram{ - Count: 24, - ZeroCount: 4, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - expectedMin: -16, - expectedMax: 16, - }, - { - text: "empty histogram", - h: &histogram.Histogram{}, - expectedMin: math.NaN(), - expectedMax: math.NaN(), - }, - } - - test, err := NewTest(t, "") - require.NoError(t, err) - t.Cleanup(test.Close) - idx := int64(0) - for _, floatHisto := range []bool{true, false} { - for _, c := range cases { - t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) { - seriesName := "sparse_histogram_series" - lbls := labels.FromStrings("__name__", seriesName) - engine := test.QueryEngine() - - ts := idx * int64(10*time.Minute/time.Millisecond) - app := test.Storage().Appender(context.TODO()) - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat()) - } else { - _, err = app.AppendHistogram(0, lbls, ts, c.h, nil) - } - require.NoError(t, err) - require.NoError(t, app.Commit()) - - queryString := fmt.Sprintf("histogram_min(%s)", seriesName) - qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(test.Context()) - require.NoError(t, res.Err) - - vector, err := res.Vector() - require.NoError(t, err) - - require.Len(t, vector, 1) - require.Nil(t, vector[0].H) - if math.IsNaN(c.expectedMin) { - require.True(t, math.IsNaN(vector[0].V)) - } else { - require.Equal(t, float64(c.expectedMin), vector[0].V) - } - - queryString = fmt.Sprintf("histogram_max(%s)", seriesName) - qry, err = engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res = qry.Exec(test.Context()) - require.NoError(t, res.Err) - - vector, err = res.Vector() - require.NoError(t, err) - - require.Len(t, vector, 1) - require.Nil(t, vector[0].H) - if math.IsNaN(c.expectedMax) { - require.True(t, math.IsNaN(vector[0].V)) - } else { - require.Equal(t, c.expectedMax, vector[0].V) - } - idx++ - }) - } - } -} - func TestNativeHistogram_HistogramQuantile(t *testing.T) { // TODO(codesome): Integrate histograms into the PromQL testing framework // and write more tests there. diff --git a/promql/functions.go b/promql/functions.go index 179d8b23e..96bffab96 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -996,66 +996,6 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod return enh.Out } -// === histogram_min(Vector parser.ValueTypeVector) Vector === -func funcHistogramMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - inVec := vals[0].(Vector) - - for _, sample := range inVec { - // Skip non-histogram samples. - if sample.H == nil { - continue - } - - min := math.NaN() // initialize to NaN in case histogram is empty - - it := sample.H.AllBucketIterator() // AllBucketIterator starts at the lowest bucket in the native histogram - for it.Next() { - bucket := it.At() - // Find the lower limit of the lowest populated bucket - if bucket.Count > 0 { - min = bucket.Lower - break - } - } - - enh.Out = append(enh.Out, Sample{ - Metric: enh.DropMetricName(sample.Metric), - Point: Point{V: min}, - }) - } - return enh.Out -} - -// === histogram_max(Vector parser.ValueTypeVector) Vector === -func funcHistogramMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - inVec := vals[0].(Vector) - - for _, sample := range inVec { - // Skip non-histogram samples. - if sample.H == nil { - continue - } - - max := math.NaN() // initialize to NaN in case histogram is empty - - it := sample.H.AllReverseBucketIterator() // AllReverseBucketIterator starts at the highest bucket in the native histogram - for it.Next() { - bucket := it.At() - // Find the upper limit of the highest populated bucket - if bucket.Count > 0 { - max = bucket.Upper - break - } - } - - enh.Out = append(enh.Out, Sample{ - Metric: enh.DropMetricName(sample.Metric), - Point: Point{V: max}, - }) - } - return enh.Out -} - // === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { lower := vals[0].(Vector)[0].F @@ -1435,8 +1375,6 @@ var FunctionCalls = map[string]FunctionCall{ "floor": funcFloor, "histogram_count": funcHistogramCount, "histogram_fraction": funcHistogramFraction, - "histogram_max": funcHistogramMax, - "histogram_min": funcHistogramMin, "histogram_quantile": funcHistogramQuantile, "histogram_sum": funcHistogramSum, "holt_winters": funcHoltWinters, diff --git a/promql/parser/functions.go b/promql/parser/functions.go index 7c337412f..479c7f635 100644 --- a/promql/parser/functions.go +++ b/promql/parser/functions.go @@ -173,16 +173,6 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeVector}, ReturnType: ValueTypeVector, }, - "histogram_min": { - Name: "histogram_min", - ArgTypes: []ValueType{ValueTypeVector}, - ReturnType: ValueTypeVector, - }, - "histogram_max": { - Name: "histogram_max", - ArgTypes: []ValueType{ValueTypeVector}, - ReturnType: ValueTypeVector, - }, "histogram_fraction": { Name: "histogram_fraction", ArgTypes: []ValueType{ValueTypeScalar, ValueTypeScalar, ValueTypeVector}, diff --git a/promql/quantile.go b/promql/quantile.go index d80345e81..793a6629f 100644 --- a/promql/quantile.go +++ b/promql/quantile.go @@ -158,9 +158,21 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { var ( bucket histogram.Bucket[float64] count float64 - it = h.AllBucketIterator() - rank = q * h.Count + it histogram.BucketIterator[float64] + rank float64 ) + + // if there are NaN observations in the histogram (h.Sum is NaN), use the forward iterator + // if the q < 0.5, use the forward iterator + // if the q >= 0.5, use the reverse iterator + if math.IsNaN(h.Sum) || q < 0.5 { + it = h.AllBucketIterator() + rank = q * h.Count + } else { + it = h.AllReverseBucketIterator() + rank = (1 - q) * h.Count + } + for it.Next() { bucket = it.At() count += bucket.Count @@ -193,7 +205,15 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { return bucket.Upper } - rank -= count - bucket.Count + // if there are NaN observations in the histogram (h.Sum is NaN), use the forward iterator + // if the q < 0.5, use the forward iterator + // if the q >= 0.5, use the reverse iterator + if math.IsNaN(h.Sum) || q < 0.5 { + rank -= count - bucket.Count + } else { + rank = count - rank + } + // TODO(codesome): Use a better estimation than linear. return bucket.Lower + (bucket.Upper-bucket.Lower)*(rank/bucket.Count) } From 60b380da70213975bfd343be64f5b89b03e2c944 Mon Sep 17 00:00:00 2001 From: Guillaume Berche Date: Wed, 12 Jul 2023 09:08:23 +0200 Subject: [PATCH 249/632] Refine functions.md as suggested during review See https://github.com/prometheus/prometheus/pull/11404#issuecomment-1631165746 Signed-off-by: Guillaume Berche --- docs/querying/functions.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index 3658c10c6..9b58cc319 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -289,7 +289,7 @@ label_join(up{job="api-server",src1="a",src2="b",src3="c"}, "foo", ",", "src1", ## `label_replace()` For each timeseries in `v`, `label_replace(v instant-vector, dst_label string, replacement string, src_label string, regex string)` -matches the regular expression `regex` against the value of the label `src_label`. If it +matches the [regular expression](https://github.com/google/re2/wiki/Syntax) `regex` against the value of the label `src_label`. If it matches, the value of the label `dst_label` in the returned timeseries will be the expansion of `replacement`, together with the original labels in the input. Capturing groups in the regular expression can be referenced with `$1`, `$2`, etc. Named capturing groups in the regular expression can be referenced with `$name` (where `name` is the capturing group name). If the regular expression doesn't match then the timeseries is returned unchanged. @@ -300,6 +300,10 @@ This example will return timeseries with the values `a:c` at label `service` and label_replace(up{job="api-server",service="a:c"}, "foo", "$1", "service", "(.*):.*") ``` +This second example has the same effect than the first example, and illustrates use of named capturing groups: +``` +label_replace(up{job="api-server",service="a:c"}, "foo", "$name", "service", "(?P.*):(?P.*)") +``` ## `ln()` `ln(v instant-vector)` calculates the natural logarithm for all elements in `v`. From 162612ea8627447be0b8fa170268b76524124bbc Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 12 Jul 2023 14:52:49 +0200 Subject: [PATCH 250/632] histograms: Improve comment Oversight during review of #12525. Signed-off-by: beorn7 --- promql/quantile.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/promql/quantile.go b/promql/quantile.go index 793a6629f..7f48b5945 100644 --- a/promql/quantile.go +++ b/promql/quantile.go @@ -205,9 +205,10 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { return bucket.Upper } - // if there are NaN observations in the histogram (h.Sum is NaN), use the forward iterator - // if the q < 0.5, use the forward iterator - // if the q >= 0.5, use the reverse iterator + // NaN observations increase h.Count but not the total number of + // observations in the buckets. Therefore, we have to use the forward + // iterator to find percentiles. We recognize histograms containing NaN + // observations by checking if their h.Sum is NaN. if math.IsNaN(h.Sum) || q < 0.5 { rank -= count - bucket.Count } else { From 1c3bd04beae91e7abae55903c814b67869cd7186 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 12 Jul 2023 18:17:35 +0200 Subject: [PATCH 251/632] histograms: Modify test to expose bug #12552 Signed-off-by: beorn7 --- model/textparse/protobufparse_test.go | 297 +++++++++++++++++++++++--- 1 file changed, 263 insertions(+), 34 deletions(-) diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index 882cce59d..88ad9f221 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -408,6 +408,61 @@ metric: < > > +`, + `name: "test_histogram_family" +help: "Test histogram metric family with two very simple histograms." +type: HISTOGRAM +metric: < + label: < + name: "foo" + value: "bar" + > + histogram: < + sample_count: 5 + sample_sum: 12.1 + bucket: < + cumulative_count: 2 + upper_bound: 1.1 + > + bucket: < + cumulative_count: 3 + upper_bound: 2.2 + > + schema: 3 + positive_span: < + offset: 8 + length: 2 + > + positive_delta: 2 + positive_delta: 1 + > +> +metric: < + label: < + name: "foo" + value: "baz" + > + histogram: < + sample_count: 6 + sample_sum: 13.1 + bucket: < + cumulative_count: 1 + upper_bound: 1.1 + > + bucket: < + cumulative_count: 5 + upper_bound: 2.2 + > + schema: 3 + positive_span: < + offset: 8 + length: 2 + > + positive_delta: 1 + positive_delta: 4 + > +> + `, `name: "rpc_durations_seconds" help: "RPC latency distributions." @@ -751,6 +806,50 @@ func TestProtobufParse(t *testing.T) { "le", "+Inf", ), }, + { + m: "test_histogram_family", + help: "Test histogram metric family with two very simple histograms.", + }, + { + m: "test_histogram_family", + typ: MetricTypeHistogram, + }, + { + m: "test_histogram_family\xfffoo\xffbar", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 5, + Sum: 12.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "foo", "bar", + ), + }, + { + m: "test_histogram_family\xfffoo\xffbaz", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 6, + Sum: 13.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{1, 4}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "foo", "baz", + ), + }, { m: "rpc_durations_seconds", help: "RPC latency distributions.", @@ -1321,14 +1420,144 @@ func TestProtobufParse(t *testing.T) { ), }, { // 53 + m: "test_histogram_family", + help: "Test histogram metric family with two very simple histograms.", + }, + { // 54 + m: "test_histogram_family", + typ: MetricTypeHistogram, + }, + { // 55 + m: "test_histogram_family\xfffoo\xffbar", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 5, + Sum: 12.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "foo", "bar", + ), + }, + { // 56 + m: "test_histogram_family_count\xfffoo\xffbar", + v: 5, + lset: labels.FromStrings( + "__name__", "test_histogram_family_count", + "foo", "bar", + ), + }, + { // 57 + m: "test_histogram_family_sum\xfffoo\xffbar", + v: 12.1, + lset: labels.FromStrings( + "__name__", "test_histogram_family_sum", + "foo", "bar", + ), + }, + { // 58 + m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff1.1", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "bar", + "le", "1.1", + ), + }, + { // 59 + m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff2.2", + v: 3, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "bar", + "le", "2.2", + ), + }, + { // 60 + m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff+Inf", + v: 5, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "bar", + "le", "+Inf", + ), + }, + { // 61 + m: "test_histogram_family\xfffoo\xffbaz", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 6, + Sum: 13.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{1, 4}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "foo", "baz", + ), + }, + { // 62 + m: "test_histogram_family_count\xfffoo\xffbaz", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram_family_count", + "foo", "baz", + ), + }, + { // 63 + m: "test_histogram_family_sum\xfffoo\xffbaz", + v: 13.1, + lset: labels.FromStrings( + "__name__", "test_histogram_family_sum", + "foo", "baz", + ), + }, + { // 64 + m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff1.1", + v: 1, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "baz", + "le", "1.1", + ), + }, + { // 65 + m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff2.2", + v: 5, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "baz", + "le", "2.2", + ), + }, + { // 66 + m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff+Inf", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "baz", + "le", "+Inf", + ), + }, + { // 67 m: "rpc_durations_seconds", help: "RPC latency distributions.", }, - { // 54 + { // 68 m: "rpc_durations_seconds", typ: MetricTypeSummary, }, - { // 55 + { // 69 m: "rpc_durations_seconds_count\xffservice\xffexponential", v: 262, lset: labels.FromStrings( @@ -1336,7 +1565,7 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 56 + { // 70 m: "rpc_durations_seconds_sum\xffservice\xffexponential", v: 0.00025551262820703587, lset: labels.FromStrings( @@ -1344,7 +1573,7 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 57 + { // 71 m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5", v: 6.442786329648548e-07, lset: labels.FromStrings( @@ -1353,7 +1582,7 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 58 + { // 72 m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9", v: 1.9435742936658396e-06, lset: labels.FromStrings( @@ -1362,7 +1591,7 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 59 + { // 73 m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99", v: 4.0471608667037015e-06, lset: labels.FromStrings( @@ -1371,22 +1600,22 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 60 + { // 74 m: "without_quantiles", help: "A summary without quantiles.", }, - { // 61 + { // 75 m: "without_quantiles", typ: MetricTypeSummary, }, - { // 62 + { // 76 m: "without_quantiles_count", v: 42, lset: labels.FromStrings( "__name__", "without_quantiles_count", ), }, - { // 63 + { // 77 m: "without_quantiles_sum", v: 1.234, lset: labels.FromStrings( @@ -1420,61 +1649,61 @@ func TestProtobufParse(t *testing.T) { var e exemplar.Exemplar p.Metric(&res) found := p.Exemplar(&e) - require.Equal(t, exp[i].m, string(m)) + require.Equal(t, exp[i].m, string(m), "i: %d", i) if ts != nil { - require.Equal(t, exp[i].t, *ts) + require.Equal(t, exp[i].t, *ts, "i: %d", i) } else { - require.Equal(t, exp[i].t, int64(0)) + require.Equal(t, exp[i].t, int64(0), "i: %d", i) } - require.Equal(t, exp[i].v, v) - require.Equal(t, exp[i].lset, res) + require.Equal(t, exp[i].v, v, "i: %d", i) + require.Equal(t, exp[i].lset, res, "i: %d", i) if len(exp[i].e) == 0 { - require.Equal(t, false, found) + require.Equal(t, false, found, "i: %d", i) } else { - require.Equal(t, true, found) - require.Equal(t, exp[i].e[0], e) + require.Equal(t, true, found, "i: %d", i) + require.Equal(t, exp[i].e[0], e, "i: %d", i) } case EntryHistogram: m, ts, shs, fhs := p.Histogram() p.Metric(&res) - require.Equal(t, exp[i].m, string(m)) + require.Equal(t, exp[i].m, string(m), "i: %d", i) if ts != nil { - require.Equal(t, exp[i].t, *ts) + require.Equal(t, exp[i].t, *ts, "i: %d", i) } else { - require.Equal(t, exp[i].t, int64(0)) + require.Equal(t, exp[i].t, int64(0), "i: %d", i) } - require.Equal(t, exp[i].lset, res) - require.Equal(t, exp[i].m, string(m)) + require.Equal(t, exp[i].lset, res, "i: %d", i) + require.Equal(t, exp[i].m, string(m), "i: %d", i) if shs != nil { - require.Equal(t, exp[i].shs, shs) + require.Equal(t, exp[i].shs, shs, "i: %d", i) } else { - require.Equal(t, exp[i].fhs, fhs) + require.Equal(t, exp[i].fhs, fhs, "i: %d", i) } j := 0 for e := (exemplar.Exemplar{}); p.Exemplar(&e); j++ { - require.Equal(t, exp[i].e[j], e) + require.Equal(t, exp[i].e[j], e, "i: %d", i) e = exemplar.Exemplar{} } - require.Equal(t, len(exp[i].e), j, "not enough exemplars found") + require.Equal(t, len(exp[i].e), j, "not enough exemplars found, i: %d", i) case EntryType: m, typ := p.Type() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].typ, typ) + require.Equal(t, exp[i].m, string(m), "i: %d", i) + require.Equal(t, exp[i].typ, typ, "i: %d", i) case EntryHelp: m, h := p.Help() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].help, string(h)) + require.Equal(t, exp[i].m, string(m), "i: %d", i) + require.Equal(t, exp[i].help, string(h), "i: %d", i) case EntryUnit: m, u := p.Unit() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].unit, string(u)) + require.Equal(t, exp[i].m, string(m), "i: %d", i) + require.Equal(t, exp[i].unit, string(u), "i: %d", i) case EntryComment: - require.Equal(t, exp[i].comment, string(p.Comment())) + require.Equal(t, exp[i].comment, string(p.Comment()), "i: %d", i) } i++ From da047c6857850e315543e73272312a368ceaffbe Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 12 Jul 2023 18:42:02 +0200 Subject: [PATCH 252/632] histograms: Fix bug #12552 The problem was the following: When trying to parse native histograms and classic histograms in parallel, the parser would first parse the histogram proto messages as a native histogram and then parse the same message again, but now as a classic histogram. Afterwards, it would forget that it was dealing with a metric family that contains native histograms and would parse the rest of the metric family as classic histograms only. The fix is to check again after being done with a classic histogram. Signed-off-by: beorn7 --- model/textparse/protobufparse.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index b831251ad..2ef52da5c 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -54,7 +54,7 @@ type ProtobufParser struct { // quantiles/buckets. fieldPos int fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. - redoClassic bool // true after parsing a native histogram if we need to parse it again as a classit histogram. + redoClassic bool // true after parsing a native histogram if we need to parse it again as a classic histogram. // state is marked by the entry we are processing. EntryInvalid implies // that we have to decode the next MetricFamily. @@ -411,6 +411,14 @@ func (p *ProtobufParser) Next() (Entry, error) { p.metricPos++ p.fieldPos = -2 p.fieldsDone = false + // If this is a metric family containing native + // histograms, we have to switch back to native + // histograms after parsing a classic histogram. + if p.state == EntrySeries && + (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && + isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) { + p.state = EntryHistogram + } } if p.metricPos >= len(p.mf.GetMetric()) { p.state = EntryInvalid From e1ace8d00e09e3a791b2814b635d917116e27914 Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Wed, 12 Jul 2023 21:34:55 +0200 Subject: [PATCH 253/632] Add PromQL format and label matcher set/delete commands to promtool Signed-off-by: Rob Skillington Signed-off-by: Julien Pivotto --- cmd/promtool/main.go | 103 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index f94be8b27..2b5ee9aee 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -58,6 +58,7 @@ import ( "github.com/prometheus/prometheus/notifier" _ "github.com/prometheus/prometheus/plugins" // Register plugins. "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/util/documentcli" ) @@ -245,6 +246,22 @@ func main() { "A list of one or more files containing recording rules to be backfilled. All recording rules listed in the files will be backfilled. Alerting rules are not evaluated.", ).Required().ExistingFiles() + promQLCmd := app.Command("promql", "PromQL formatting and editing.") + + promQLFormatCmd := promQLCmd.Command("format", "Format PromQL query to pretty printed form.") + promQLFormatQuery := promQLFormatCmd.Arg("query", "PromQL query.").Required().String() + + promQLLabelsCmd := promQLCmd.Command("label-matchers", "Edit label matchers contained within an existing PromQL query.") + promQLLabelsSetCmd := promQLLabelsCmd.Command("set", "Set a label matcher in the query.") + promQLLabelsSetType := promQLLabelsSetCmd.Flag("type", "Type of the label matcher to set.").Short('t').Default("=").Enum("=", "!=", "=~", "!~") + promQLLabelsSetQuery := promQLLabelsSetCmd.Arg("query", "PromQL query.").Required().String() + promQLLabelsSetName := promQLLabelsSetCmd.Arg("name", "Name of the label matcher to set.").Required().String() + promQLLabelsSetValue := promQLLabelsSetCmd.Arg("value", "Value of the label matcher to set.").Required().String() + + promQLLabelsDeleteCmd := promQLLabelsCmd.Command("delete", "Delete a label from the query.") + promQLLabelsDeleteQuery := promQLLabelsDeleteCmd.Arg("query", "PromQL query.").Required().String() + promQLLabelsDeleteName := promQLLabelsDeleteCmd.Arg("name", "Name of the label to delete.").Required().String() + featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings() documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden() @@ -364,8 +381,18 @@ func main() { case importRulesCmd.FullCommand(): os.Exit(checkErr(importRules(serverURL, httpRoundTripper, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...))) + case documentationCmd.FullCommand(): os.Exit(checkErr(documentcli.GenerateMarkdown(app.Model(), os.Stdout))) + + case promQLFormatCmd.FullCommand(): + os.Exit(checkErr(formatPromQL(*promQLFormatQuery))) + + case promQLLabelsSetCmd.FullCommand(): + os.Exit(checkErr(labelsSetPromQL(*promQLLabelsSetQuery, *promQLLabelsSetType, *promQLLabelsSetName, *promQLLabelsSetValue))) + + case promQLLabelsDeleteCmd.FullCommand(): + os.Exit(checkErr(labelsDeletePromQL(*promQLLabelsDeleteQuery, *promQLLabelsDeleteName))) } } @@ -1375,3 +1402,79 @@ func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *c return nil } + +func formatPromQL(query string) error { + expr, err := parser.ParseExpr(query) + if err != nil { + return err + } + + fmt.Println(expr.Pretty(0)) + return nil +} + +func labelsSetPromQL(query, labelMatchType, name, value string) error { + expr, err := parser.ParseExpr(query) + if err != nil { + return err + } + + var matchType labels.MatchType + switch labelMatchType { + case parser.ItemType(parser.EQL).String(): + matchType = labels.MatchEqual + case parser.ItemType(parser.NEQ).String(): + matchType = labels.MatchNotEqual + case parser.ItemType(parser.EQL_REGEX).String(): + matchType = labels.MatchRegexp + case parser.ItemType(parser.NEQ_REGEX).String(): + matchType = labels.MatchNotRegexp + default: + return fmt.Errorf("invalid label match type: %s", labelMatchType) + } + + parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + if n, ok := node.(*parser.VectorSelector); ok { + var found bool + for i, l := range n.LabelMatchers { + if l.Name == name { + n.LabelMatchers[i].Type = matchType + n.LabelMatchers[i].Value = value + found = true + } + } + if !found { + n.LabelMatchers = append(n.LabelMatchers, &labels.Matcher{ + Type: matchType, + Name: name, + Value: value, + }) + } + } + return nil + }) + + fmt.Println(expr.Pretty(0)) + return nil +} + +func labelsDeletePromQL(query, name string) error { + expr, err := parser.ParseExpr(query) + if err != nil { + return err + } + + parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + if n, ok := node.(*parser.VectorSelector); ok { + for i, l := range n.LabelMatchers { + if l.Name == name { + n.LabelMatchers = append(n.LabelMatchers[:i], n.LabelMatchers[i+1:]...) + } + } + } + return nil + }) + + fmt.Println(expr.Pretty(0)) + return nil +} From b3b669fd9a00d980cb853efa1b8a4912ddeba3bf Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Wed, 12 Jul 2023 22:30:20 +0200 Subject: [PATCH 254/632] Add experimental flag and docs Signed-off-by: Julien Pivotto --- cmd/promtool/main.go | 14 ++++++- docs/command-line/promtool.md | 71 +++++++++++++++++++++++++++++++++++ 2 files changed, 84 insertions(+), 1 deletion(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 2b5ee9aee..da4b8dc79 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -92,6 +92,8 @@ func main() { checkCmd := app.Command("check", "Check the resources for validity.") + experimental := app.Flag("experimental", "Enable experimental commands.").Bool() + sdCheckCmd := checkCmd.Command("service-discovery", "Perform service discovery for the given job name and report the results, including relabeling.") sdConfigFile := sdCheckCmd.Arg("config-file", "The prometheus config file.").Required().ExistingFile() sdJobName := sdCheckCmd.Arg("job", "The job to run service discovery for.").Required().String() @@ -246,7 +248,7 @@ func main() { "A list of one or more files containing recording rules to be backfilled. All recording rules listed in the files will be backfilled. Alerting rules are not evaluated.", ).Required().ExistingFiles() - promQLCmd := app.Command("promql", "PromQL formatting and editing.") + promQLCmd := app.Command("promql", "PromQL formatting and editing. Requires the --experimental flag.") promQLFormatCmd := promQLCmd.Command("format", "Format PromQL query to pretty printed form.") promQLFormatQuery := promQLFormatCmd.Arg("query", "PromQL query.").Required().String() @@ -386,16 +388,26 @@ func main() { os.Exit(checkErr(documentcli.GenerateMarkdown(app.Model(), os.Stdout))) case promQLFormatCmd.FullCommand(): + checkExperimental(*experimental) os.Exit(checkErr(formatPromQL(*promQLFormatQuery))) case promQLLabelsSetCmd.FullCommand(): + checkExperimental(*experimental) os.Exit(checkErr(labelsSetPromQL(*promQLLabelsSetQuery, *promQLLabelsSetType, *promQLLabelsSetName, *promQLLabelsSetValue))) case promQLLabelsDeleteCmd.FullCommand(): + checkExperimental(*experimental) os.Exit(checkErr(labelsDeletePromQL(*promQLLabelsDeleteQuery, *promQLLabelsDeleteName))) } } +func checkExperimental(f bool) { + if !f { + fmt.Fprintln(os.Stderr, "This command is experimental and requires the --experimental flag to be set.") + os.Exit(1) + } +} + // nolint:revive var lintError = fmt.Errorf("lint error") diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 673e8c048..587286e10 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -14,6 +14,7 @@ Tooling for the Prometheus monitoring system. | --- | --- | | -h, --help | Show context-sensitive help (also try --help-long and --help-man). | | --version | Show application version. | +| --experimental | Enable experimental commands. | | --enable-feature | Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details. | @@ -30,6 +31,7 @@ Tooling for the Prometheus monitoring system. | push | Push to a Prometheus server. | | test | Unit testing. | | tsdb | Run tsdb commands. | +| promql | PromQL formatting and editing. Requires the --experimental flag. | @@ -609,3 +611,72 @@ Create blocks of data for new recording rules. | rule-files | A list of one or more files containing recording rules to be backfilled. All recording rules listed in the files will be backfilled. Alerting rules are not evaluated. | Yes | + + +### `promtool promql` + +PromQL formatting and editing. Requires the --experimental flag. + + + +##### `promtool promql format` + +Format PromQL query to pretty printed form. + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| query | PromQL query. | Yes | + + + + +##### `promtool promql label-matchers` + +Edit label matchers contained within an existing PromQL query. + + + +##### `promtool promql label-matchers set` + +Set a label matcher in the query. + + + +###### Flags + +| Flag | Description | Default | +| --- | --- | --- | +| -t, --type | Type of the label matcher to set. | `=` | + + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| query | PromQL query. | Yes | +| name | Name of the label matcher to set. | Yes | +| value | Value of the label matcher to set. | Yes | + + + + +##### `promtool promql label-matchers delete` + +Delete a label from the query. + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| query | PromQL query. | Yes | +| name | Name of the label to delete. | Yes | + + From 17cdfdd79f42ab52a3cff6882ab958dcfa411bc2 Mon Sep 17 00:00:00 2001 From: Mikhail Fesenko Date: Sat, 15 Oct 2022 01:18:20 +0200 Subject: [PATCH 255/632] maraphon.go: Simplified conditions in method Signed-off-by: Mikhail Fesenko --- discovery/marathon/marathon.go | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index cfd3e2c08..ef897234d 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -106,14 +106,18 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if len(c.AuthToken) > 0 && len(c.AuthTokenFile) > 0 { return errors.New("marathon_sd: at most one of auth_token & auth_token_file must be configured") } - if c.HTTPClientConfig.BasicAuth != nil && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) { - return errors.New("marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured") - } - if (len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0) && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) { - return errors.New("marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured") - } - if c.HTTPClientConfig.Authorization != nil && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) { - return errors.New("marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured") + + isAuthTokenProvided := len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0 + if isAuthTokenProvided { + if c.HTTPClientConfig.BasicAuth != nil { + return errors.New("marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured") + } + if len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0 { + return errors.New("marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured") + } + if c.HTTPClientConfig.Authorization != nil { + return errors.New("marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured") + } } return c.HTTPClientConfig.Validate() } From 02e11cc2a721be3dd6ae1bc4cb3bc37d65b6db96 Mon Sep 17 00:00:00 2001 From: Mikhail Fesenko Date: Thu, 13 Jul 2023 00:52:27 +0200 Subject: [PATCH 256/632] Fix from discussion Signed-off-by: Mikhail Fesenko --- discovery/marathon/marathon.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index ef897234d..3baf79aff 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -107,15 +107,13 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return errors.New("marathon_sd: at most one of auth_token & auth_token_file must be configured") } - isAuthTokenProvided := len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0 - if isAuthTokenProvided { - if c.HTTPClientConfig.BasicAuth != nil { + if len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0 { + switch { + case c.HTTPClientConfig.BasicAuth != nil: return errors.New("marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured") - } - if len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0 { + case len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0: return errors.New("marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured") - } - if c.HTTPClientConfig.Authorization != nil { + case c.HTTPClientConfig.Authorization != nil: return errors.New("marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured") } } From 0e3f35324b46010b0f6363d01e592eebad1a80ad Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 13 Jul 2023 14:16:10 +0200 Subject: [PATCH 257/632] scrape: Enable ingestion of multiple exemplars per sample This has become a requirement for native histograms, as a single histogram sample commonly has many buckets, so that providing many exemplars makes sense. Since OM text doesn't support native histograms yet, the test had to be expanded to also support protobuf test cases. Signed-off-by: beorn7 --- model/textparse/interface.go | 4 +- model/textparse/openmetricsparse.go | 8 +- scrape/scrape.go | 2 +- scrape/scrape_test.go | 144 ++++++++++++++++++++++++++-- tsdb/exemplar.go | 4 +- 5 files changed, 147 insertions(+), 15 deletions(-) diff --git a/model/textparse/interface.go b/model/textparse/interface.go index efa581410..38903afc9 100644 --- a/model/textparse/interface.go +++ b/model/textparse/interface.go @@ -59,7 +59,9 @@ type Parser interface { Metric(l *labels.Labels) string // Exemplar writes the exemplar of the current sample into the passed - // exemplar. It returns if an exemplar exists or not. + // exemplar. It can be called repeatedly to retrieve multiple exemplars + // for the same sample. It returns false once all exemplars are + // retrieved (including the case where no exemplars exist at all). Exemplar(l *exemplar.Exemplar) bool // Next advances the parser to the next sample. It returns false if no diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index c17d40020..e0833636f 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -174,8 +174,10 @@ func (p *OpenMetricsParser) Metric(l *labels.Labels) string { return s } -// Exemplar writes the exemplar of the current sample into the passed -// exemplar. It returns the whether an exemplar exists. +// Exemplar writes the exemplar of the current sample into the passed exemplar. +// It returns whether an exemplar exists. As OpenMetrics only ever has one +// exemplar per sample, every call after the first (for the same sample) will +// always return false. func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { if len(p.exemplar) == 0 { return false @@ -204,6 +206,8 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { p.builder.Sort() e.Labels = p.builder.Labels() + // Wipe exemplar so that future calls return false. + p.exemplar = p.exemplar[:0] return true } diff --git a/scrape/scrape.go b/scrape/scrape.go index ec65c5ad9..df729b448 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1685,7 +1685,7 @@ loop: // number of samples remaining after relabeling. added++ - if hasExemplar := p.Exemplar(&e); hasExemplar { + for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) { if !e.HasTs { e.Ts = t } diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 6c45c26b4..72e22fd0d 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -17,6 +17,7 @@ import ( "bytes" "compress/gzip" "context" + "encoding/binary" "fmt" "io" "math" @@ -29,6 +30,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/gogo/protobuf/proto" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" @@ -39,6 +41,7 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/model/textparse" @@ -1980,15 +1983,18 @@ func TestScrapeLoopAppendExemplar(t *testing.T) { tests := []struct { title string scrapeText string + contentType string discoveryLabels []string - samples []sample + floats []sample + histograms []histogramSample exemplars []exemplar.Exemplar }{ { title: "Metric without exemplars", scrapeText: "metric_total{n=\"1\"} 0\n# EOF", + contentType: "application/openmetrics-text", discoveryLabels: []string{"n", "2"}, - samples: []sample{{ + floats: []sample{{ metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"), v: 0, }}, @@ -1996,8 +2002,9 @@ func TestScrapeLoopAppendExemplar(t *testing.T) { { title: "Metric with exemplars", scrapeText: "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0\n# EOF", + contentType: "application/openmetrics-text", discoveryLabels: []string{"n", "2"}, - samples: []sample{{ + floats: []sample{{ metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"), v: 0, }}, @@ -2008,8 +2015,9 @@ func TestScrapeLoopAppendExemplar(t *testing.T) { { title: "Metric with exemplars and TS", scrapeText: "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0 10000\n# EOF", + contentType: "application/openmetrics-text", discoveryLabels: []string{"n", "2"}, - samples: []sample{{ + floats: []sample{{ metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"), v: 0, }}, @@ -2022,7 +2030,8 @@ func TestScrapeLoopAppendExemplar(t *testing.T) { scrapeText: `metric_total{n="1"} 1 # {t="1"} 1.0 10000 metric_total{n="2"} 2 # {t="2"} 2.0 20000 # EOF`, - samples: []sample{{ + contentType: "application/openmetrics-text", + floats: []sample{{ metric: labels.FromStrings("__name__", "metric_total", "n", "1"), v: 1, }, { @@ -2034,6 +2043,104 @@ metric_total{n="2"} 2 # {t="2"} 2.0 20000 {Labels: labels.FromStrings("t", "2"), Value: 2, Ts: 20000000, HasTs: true}, }, }, + { + title: "Native histogram with two exemplars", + scrapeText: `name: "test_histogram" +help: "Test histogram with many buckets removed to keep it manageable in size." +type: HISTOGRAM +metric: < + histogram: < + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "59727" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "5617" + > + value: -0.00029 + > + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + > + timestamp_ms: 1234568 +> + +`, + contentType: "application/vnd.google.protobuf", + histograms: []histogramSample{{ + t: 1234568, + h: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + }}, + exemplars: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, Ts: 1234568, HasTs: false}, + }, + }, } for _, test := range tests { @@ -2069,8 +2176,8 @@ metric_total{n="2"} 2 # {t="2"} 2.0 20000 now := time.Now() - for i := range test.samples { - test.samples[i].t = timestamp.FromTime(now) + for i := range test.floats { + test.floats[i].t = timestamp.FromTime(now) } // We need to set the timestamp for expected exemplars that does not have a timestamp. @@ -2080,10 +2187,29 @@ metric_total{n="2"} 2 # {t="2"} 2.0 20000 } } - _, _, _, err := sl.append(app, []byte(test.scrapeText), "application/openmetrics-text", now) + buf := &bytes.Buffer{} + if test.contentType == "application/vnd.google.protobuf" { + // In case of protobuf, we have to create the binary representation. + pb := &dto.MetricFamily{} + // From text to proto message. + require.NoError(t, proto.UnmarshalText(test.scrapeText, pb)) + // From proto message to binary protobuf. + protoBuf, err := proto.Marshal(pb) + require.NoError(t, err) + + // Write first length, then binary protobuf. + varintBuf := binary.AppendUvarint(nil, uint64(len(protoBuf))) + buf.Write(varintBuf) + buf.Write(protoBuf) + } else { + buf.WriteString(test.scrapeText) + } + + _, _, _, err := sl.append(app, buf.Bytes(), test.contentType, now) require.NoError(t, err) require.NoError(t, app.Commit()) - require.Equal(t, test.samples, app.result) + require.Equal(t, test.floats, app.result) + require.Equal(t, test.histograms, app.resultHistograms) require.Equal(t, test.exemplars, app.resultExemplars) }) } diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index bf401da3c..d4c0505cc 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -365,8 +365,8 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp if prev := ce.exemplars[ce.nextIndex]; prev == nil { ce.exemplars[ce.nextIndex] = &circularBufferEntry{} } else { - // There exists exemplar already on this ce.nextIndex entry, drop it, to make place - // for others. + // There exists an exemplar already on this ce.nextIndex entry, + // drop it, to make place for others. var buf [1024]byte prevLabels := prev.ref.seriesLabels.Bytes(buf[:]) if prev.next == noExemplar { From 536a487af456abc66b90a4307388619d26209034 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 13 Jul 2023 14:27:51 +0200 Subject: [PATCH 258/632] scrape: Refactor names of float samples MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Continue to remove confusion that histogram samples are also samples and histogram values are also values etc. by renaming float values and float samples using the same schema as for histograms. Concretely: - result → resultFloats (corresponding to resultHistograms) - pendingResult → pendingFloats (corresponding to pendingHistograms) - rolledbackResult → rolledbackFloats (corresponding to rolledbackHistograms) - sample → floatSample (corresponding to histogramSample) This also order the fields in `collectResultAppender` more consistently. Signed-off-by: beorn7 --- scrape/helpers_test.go | 40 ++++++------ scrape/scrape_test.go | 134 ++++++++++++++++++++--------------------- 2 files changed, 87 insertions(+), 87 deletions(-) diff --git a/scrape/helpers_test.go b/scrape/helpers_test.go index 4862886e5..c580a5051 100644 --- a/scrape/helpers_test.go +++ b/scrape/helpers_test.go @@ -53,10 +53,10 @@ func (a nopAppender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.M func (a nopAppender) Commit() error { return nil } func (a nopAppender) Rollback() error { return nil } -type sample struct { +type floatSample struct { metric labels.Labels t int64 - v float64 + f float64 } type histogramSample struct { @@ -69,23 +69,23 @@ type histogramSample struct { // It can be used as its zero value or be backed by another appender it writes samples through. type collectResultAppender struct { next storage.Appender - result []sample - pendingResult []sample - rolledbackResult []sample - pendingExemplars []exemplar.Exemplar - resultExemplars []exemplar.Exemplar + resultFloats []floatSample + pendingFloats []floatSample + rolledbackFloats []floatSample resultHistograms []histogramSample pendingHistograms []histogramSample rolledbackHistograms []histogramSample - pendingMetadata []metadata.Metadata + resultExemplars []exemplar.Exemplar + pendingExemplars []exemplar.Exemplar resultMetadata []metadata.Metadata + pendingMetadata []metadata.Metadata } func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { - a.pendingResult = append(a.pendingResult, sample{ + a.pendingFloats = append(a.pendingFloats, floatSample{ metric: lset, t: t, - v: v, + f: v, }) if ref == 0 { @@ -133,11 +133,11 @@ func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.L } func (a *collectResultAppender) Commit() error { - a.result = append(a.result, a.pendingResult...) + a.resultFloats = append(a.resultFloats, a.pendingFloats...) a.resultExemplars = append(a.resultExemplars, a.pendingExemplars...) a.resultHistograms = append(a.resultHistograms, a.pendingHistograms...) a.resultMetadata = append(a.resultMetadata, a.pendingMetadata...) - a.pendingResult = nil + a.pendingFloats = nil a.pendingExemplars = nil a.pendingHistograms = nil a.pendingMetadata = nil @@ -148,9 +148,9 @@ func (a *collectResultAppender) Commit() error { } func (a *collectResultAppender) Rollback() error { - a.rolledbackResult = a.pendingResult + a.rolledbackFloats = a.pendingFloats a.rolledbackHistograms = a.pendingHistograms - a.pendingResult = nil + a.pendingFloats = nil a.pendingHistograms = nil if a.next == nil { return nil @@ -160,14 +160,14 @@ func (a *collectResultAppender) Rollback() error { func (a *collectResultAppender) String() string { var sb strings.Builder - for _, s := range a.result { - sb.WriteString(fmt.Sprintf("committed: %s %f %d\n", s.metric, s.v, s.t)) + for _, s := range a.resultFloats { + sb.WriteString(fmt.Sprintf("committed: %s %f %d\n", s.metric, s.f, s.t)) } - for _, s := range a.pendingResult { - sb.WriteString(fmt.Sprintf("pending: %s %f %d\n", s.metric, s.v, s.t)) + for _, s := range a.pendingFloats { + sb.WriteString(fmt.Sprintf("pending: %s %f %d\n", s.metric, s.f, s.t)) } - for _, s := range a.rolledbackResult { - sb.WriteString(fmt.Sprintf("rolledback: %s %f %d\n", s.metric, s.v, s.t)) + for _, s := range a.rolledbackFloats { + sb.WriteString(fmt.Sprintf("rolledback: %s %f %d\n", s.metric, s.f, s.t)) } return sb.String() } diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 72e22fd0d..3f119b94d 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -740,12 +740,12 @@ func TestScrapeLoopStop(t *testing.T) { // We expected 1 actual sample for each scrape plus 5 for report samples. // At least 2 scrapes were made, plus the final stale markers. - if len(appender.result) < 6*3 || len(appender.result)%6 != 0 { - t.Fatalf("Expected at least 3 scrapes with 6 samples each, got %d samples", len(appender.result)) + if len(appender.resultFloats) < 6*3 || len(appender.resultFloats)%6 != 0 { + t.Fatalf("Expected at least 3 scrapes with 6 samples each, got %d samples", len(appender.resultFloats)) } // All samples in a scrape must have the same timestamp. var ts int64 - for i, s := range appender.result { + for i, s := range appender.resultFloats { switch { case i%6 == 0: ts = s.t @@ -754,9 +754,9 @@ func TestScrapeLoopStop(t *testing.T) { } } // All samples from the last scrape must be stale markers. - for _, s := range appender.result[len(appender.result)-5:] { - if !value.IsStaleNaN(s.v) { - t.Fatalf("Appended last sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(s.v)) + for _, s := range appender.resultFloats[len(appender.resultFloats)-5:] { + if !value.IsStaleNaN(s.f) { + t.Fatalf("Appended last sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(s.f)) } } } @@ -1192,10 +1192,10 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for // each scrape successful or not. - require.Equal(t, 27, len(appender.result), "Appended samples not as expected:\n%s", appender) - require.Equal(t, 42.0, appender.result[0].v, "Appended first sample not as expected") - require.True(t, value.IsStaleNaN(appender.result[6].v), - "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.result[6].v)) + require.Equal(t, 27, len(appender.resultFloats), "Appended samples not as expected:\n%s", appender) + require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected") + require.True(t, value.IsStaleNaN(appender.resultFloats[6].f), + "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f)) } func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { @@ -1257,10 +1257,10 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for // each scrape successful or not. - require.Equal(t, 17, len(appender.result), "Appended samples not as expected:\n%s", appender) - require.Equal(t, 42.0, appender.result[0].v, "Appended first sample not as expected") - require.True(t, value.IsStaleNaN(appender.result[6].v), - "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.result[6].v)) + require.Equal(t, 17, len(appender.resultFloats), "Appended samples not as expected:\n%s", appender) + require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected") + require.True(t, value.IsStaleNaN(appender.resultFloats[6].f), + "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f)) } func TestScrapeLoopCache(t *testing.T) { @@ -1342,7 +1342,7 @@ func TestScrapeLoopCache(t *testing.T) { // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for // each scrape successful or not. - require.Equal(t, 26, len(appender.result), "Appended samples not as expected:\n%s", appender) + require.Equal(t, 26, len(appender.resultFloats), "Appended samples not as expected:\n%s", appender) } func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) { @@ -1501,11 +1501,11 @@ func TestScrapeLoopAppend(t *testing.T) { require.NoError(t, err) require.NoError(t, slApp.Commit()) - expected := []sample{ + expected := []floatSample{ { metric: test.expLset, t: timestamp.FromTime(now), - v: test.expValue, + f: test.expValue, }, } @@ -1513,11 +1513,11 @@ func TestScrapeLoopAppend(t *testing.T) { // DeepEqual will report NaNs as being different, // so replace it with the expected one. if test.expValue == float64(value.NormalNaN) { - app.result[0].v = expected[0].v + app.resultFloats[0].f = expected[0].f } t.Logf("Test:%s", test.title) - require.Equal(t, expected, app.result) + require.Equal(t, expected, app.resultFloats) } } @@ -1587,13 +1587,13 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) { require.NoError(t, slApp.Commit()) - require.Equal(t, []sample{ + require.Equal(t, []floatSample{ { metric: labels.FromStrings(tc.expected...), t: timestamp.FromTime(time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)), - v: 0, + f: 0, }, - }, app.result) + }, app.resultFloats) }) } } @@ -1641,15 +1641,15 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { require.NoError(t, err) require.NoError(t, slApp.Commit()) - expected := []sample{ + expected := []floatSample{ { metric: lset, t: timestamp.FromTime(now), - v: expValue, + f: expValue, }, } - require.Equal(t, expected, app.result) + require.Equal(t, expected, app.resultFloats) } func TestScrapeLoopAppendSampleLimit(t *testing.T) { @@ -1709,14 +1709,14 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) { require.Equal(t, 1.0, change, "Unexpected change of sample limit metric: %f", change) // And verify that we got the samples that fit under the limit. - want := []sample{ + want := []floatSample{ { metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), t: timestamp.FromTime(now), - v: 1, + f: 1, }, } - require.Equal(t, want, resApp.rolledbackResult, "Appended samples not as expected:\n%s", appender) + require.Equal(t, want, resApp.rolledbackFloats, "Appended samples not as expected:\n%s", appender) now = time.Now() slApp = sl.appender(context.Background()) @@ -1869,19 +1869,19 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) { require.NoError(t, slApp.Commit()) // DeepEqual will report NaNs as being different, so replace with a different value. - want := []sample{ + want := []floatSample{ { metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), t: timestamp.FromTime(now), - v: 1, + f: 1, }, { metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), t: timestamp.FromTime(now.Add(time.Minute)), - v: 2, + f: 2, }, } - require.Equal(t, want, capp.result, "Appended samples not as expected:\n%s", appender) + require.Equal(t, want, capp.resultFloats, "Appended samples not as expected:\n%s", appender) } func TestScrapeLoopAppendStaleness(t *testing.T) { @@ -1917,24 +1917,24 @@ func TestScrapeLoopAppendStaleness(t *testing.T) { require.NoError(t, err) require.NoError(t, slApp.Commit()) - ingestedNaN := math.Float64bits(app.result[1].v) + ingestedNaN := math.Float64bits(app.resultFloats[1].f) require.Equal(t, value.StaleNaN, ingestedNaN, "Appended stale sample wasn't as expected") // DeepEqual will report NaNs as being different, so replace with a different value. - app.result[1].v = 42 - want := []sample{ + app.resultFloats[1].f = 42 + want := []floatSample{ { metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), t: timestamp.FromTime(now), - v: 1, + f: 1, }, { metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), t: timestamp.FromTime(now.Add(time.Second)), - v: 42, + f: 42, }, } - require.Equal(t, want, app.result, "Appended samples not as expected:\n%s", appender) + require.Equal(t, want, app.resultFloats, "Appended samples not as expected:\n%s", appender) } func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) { @@ -1969,14 +1969,14 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) { require.NoError(t, err) require.NoError(t, slApp.Commit()) - want := []sample{ + want := []floatSample{ { metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), t: 1000, - v: 1, + f: 1, }, } - require.Equal(t, want, app.result, "Appended samples not as expected:\n%s", appender) + require.Equal(t, want, app.resultFloats, "Appended samples not as expected:\n%s", appender) } func TestScrapeLoopAppendExemplar(t *testing.T) { @@ -1985,7 +1985,7 @@ func TestScrapeLoopAppendExemplar(t *testing.T) { scrapeText string contentType string discoveryLabels []string - floats []sample + floats []floatSample histograms []histogramSample exemplars []exemplar.Exemplar }{ @@ -1994,9 +1994,9 @@ func TestScrapeLoopAppendExemplar(t *testing.T) { scrapeText: "metric_total{n=\"1\"} 0\n# EOF", contentType: "application/openmetrics-text", discoveryLabels: []string{"n", "2"}, - floats: []sample{{ + floats: []floatSample{{ metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"), - v: 0, + f: 0, }}, }, { @@ -2004,9 +2004,9 @@ func TestScrapeLoopAppendExemplar(t *testing.T) { scrapeText: "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0\n# EOF", contentType: "application/openmetrics-text", discoveryLabels: []string{"n", "2"}, - floats: []sample{{ + floats: []floatSample{{ metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"), - v: 0, + f: 0, }}, exemplars: []exemplar.Exemplar{ {Labels: labels.FromStrings("a", "abc"), Value: 1}, @@ -2017,9 +2017,9 @@ func TestScrapeLoopAppendExemplar(t *testing.T) { scrapeText: "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0 10000\n# EOF", contentType: "application/openmetrics-text", discoveryLabels: []string{"n", "2"}, - floats: []sample{{ + floats: []floatSample{{ metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"), - v: 0, + f: 0, }}, exemplars: []exemplar.Exemplar{ {Labels: labels.FromStrings("a", "abc"), Value: 1, Ts: 10000000, HasTs: true}, @@ -2031,12 +2031,12 @@ func TestScrapeLoopAppendExemplar(t *testing.T) { metric_total{n="2"} 2 # {t="2"} 2.0 20000 # EOF`, contentType: "application/openmetrics-text", - floats: []sample{{ + floats: []floatSample{{ metric: labels.FromStrings("__name__", "metric_total", "n", "1"), - v: 1, + f: 1, }, { metric: labels.FromStrings("__name__", "metric_total", "n", "2"), - v: 2, + f: 2, }}, exemplars: []exemplar.Exemplar{ {Labels: labels.FromStrings("t", "1"), Value: 1, Ts: 10000000, HasTs: true}, @@ -2208,7 +2208,7 @@ metric: < _, _, _, err := sl.append(app, buf.Bytes(), test.contentType, now) require.NoError(t, err) require.NoError(t, app.Commit()) - require.Equal(t, test.floats, app.result) + require.Equal(t, test.floats, app.resultFloats) require.Equal(t, test.histograms, app.resultHistograms) require.Equal(t, test.exemplars, app.resultExemplars) }) @@ -2219,12 +2219,12 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) { scrapeText := []string{`metric_total{n="1"} 1 # {t="1"} 1.0 10000 # EOF`, `metric_total{n="1"} 2 # {t="2"} 2.0 20000 # EOF`} - samples := []sample{{ + samples := []floatSample{{ metric: labels.FromStrings("__name__", "metric_total", "n", "1"), - v: 1, + f: 1, }, { metric: labels.FromStrings("__name__", "metric_total", "n", "1"), - v: 2, + f: 2, }} exemplars := []exemplar.Exemplar{ {Labels: labels.FromStrings("t", "1"), Value: 1, Ts: 10000000, HasTs: true}, @@ -2280,7 +2280,7 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) { require.NoError(t, app.Commit()) } - require.Equal(t, samples, app.result) + require.Equal(t, samples, app.resultFloats) require.Equal(t, exemplars, app.resultExemplars) } @@ -2318,7 +2318,7 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) { } sl.run(nil) - require.Equal(t, 0.0, appender.result[0].v, "bad 'up' value") + require.Equal(t, 0.0, appender.resultFloats[0].f, "bad 'up' value") } func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) { @@ -2356,7 +2356,7 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) { } sl.run(nil) - require.Equal(t, 0.0, appender.result[0].v, "bad 'up' value") + require.Equal(t, 0.0, appender.resultFloats[0].f, "bad 'up' value") } type errorAppender struct { @@ -2405,14 +2405,14 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T require.NoError(t, err) require.NoError(t, slApp.Commit()) - want := []sample{ + want := []floatSample{ { metric: labels.FromStrings(model.MetricNameLabel, "normal"), t: timestamp.FromTime(now), - v: 1, + f: 1, }, } - require.Equal(t, want, app.result, "Appended samples not as expected:\n%s", appender) + require.Equal(t, want, app.resultFloats, "Appended samples not as expected:\n%s", appender) require.Equal(t, 4, total) require.Equal(t, 4, added) require.Equal(t, 1, seriesAdded) @@ -2724,14 +2724,14 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) { require.NoError(t, err) require.NoError(t, slApp.Commit()) - want := []sample{ + want := []floatSample{ { metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), t: 0, - v: 1, + f: 1, }, } - require.Equal(t, want, capp.result, "Appended samples not as expected:\n%s", appender) + require.Equal(t, want, capp.resultFloats, "Appended samples not as expected:\n%s", appender) } func TestScrapeLoop_DiscardTimestamps(t *testing.T) { @@ -2766,14 +2766,14 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) { require.NoError(t, err) require.NoError(t, slApp.Commit()) - want := []sample{ + want := []floatSample{ { metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), t: timestamp.FromTime(now), - v: 1, + f: 1, }, } - require.Equal(t, want, capp.result, "Appended samples not as expected:\n%s", appender) + require.Equal(t, want, capp.resultFloats, "Appended samples not as expected:\n%s", appender) } func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) { From 096ceca44f79a6f6ba9e31f7e5695d84d1cca330 Mon Sep 17 00:00:00 2001 From: cui fliter Date: Thu, 13 Jul 2023 21:53:40 +0800 Subject: [PATCH 259/632] remove repetitive words (#12556) Signed-off-by: cui fliter --- model/histogram/float_histogram.go | 2 +- promql/value.go | 2 +- tsdb/ooo_head_read.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 2ced09016..782b07df6 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -421,7 +421,7 @@ func addBucket( // receiving histogram, but a pointer to it is returned for convenience. // // The ideal value for maxEmptyBuckets depends on circumstances. The motivation -// to set maxEmptyBuckets > 0 is the assumption that is is less overhead to +// to set maxEmptyBuckets > 0 is the assumption that is less overhead to // represent very few empty buckets explicitly within one span than cutting the // one span into two to treat the empty buckets as a gap between the two spans, // both in terms of storage requirement as well as in terms of encoding and diff --git a/promql/value.go b/promql/value.go index f59a25112..1b2a9d221 100644 --- a/promql/value.go +++ b/promql/value.go @@ -214,7 +214,7 @@ func (s Sample) MarshalJSON() ([]byte, error) { return json.Marshal(h) } -// Vector is basically only an an alias for []Sample, but the contract is that +// Vector is basically only an alias for []Sample, but the contract is that // in a Vector, all Samples have the same timestamp. type Vector []Sample diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 8030fc367..2d683b545 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -137,7 +137,7 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra // chunks Meta the first chunk that overlaps with others. // Example chunks of a series: 5:(100, 200) 6:(500, 600) 7:(150, 250) 8:(550, 650) // In the example 5 overlaps with 7 and 6 overlaps with 8 so we only want to - // to return chunk Metas for chunk 5 and chunk 6e + // return chunk Metas for chunk 5 and chunk 6e *chks = append(*chks, tmpChks[0]) maxTime := tmpChks[0].MaxTime // Tracks the maxTime of the previous "to be merged chunk". for _, c := range tmpChks[1:] { From fd5b01afdcb1fefad227b9f7667125e02a1395f1 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Thu, 13 Jul 2023 22:26:49 +0200 Subject: [PATCH 260/632] promtool docs: write flags between backtits in help Signed-off-by: Julien Pivotto --- docs/command-line/promtool.md | 2 +- util/documentcli/documentcli.go | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 587286e10..546b200e2 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -615,7 +615,7 @@ Create blocks of data for new recording rules. ### `promtool promql` -PromQL formatting and editing. Requires the --experimental flag. +PromQL formatting and editing. Requires the `--experimental` flag. diff --git a/util/documentcli/documentcli.go b/util/documentcli/documentcli.go index c199d8d9b..720a7c9c7 100644 --- a/util/documentcli/documentcli.go +++ b/util/documentcli/documentcli.go @@ -26,6 +26,7 @@ import ( "strings" "github.com/alecthomas/kingpin/v2" + "github.com/grafana/regexp" ) // GenerateMarkdown generates the markdown documentation for an application from @@ -230,6 +231,7 @@ func writeSubcommands(writer io.Writer, level int, modelName string, commands [] if cmd.HelpLong != "" { help = cmd.HelpLong } + help = formatHyphenatedWords(help) if _, err := writer.Write([]byte(fmt.Sprintf("\n\n%s `%s %s`\n\n%s\n\n", strings.Repeat("#", level+1), modelName, cmd.FullCommand, help))); err != nil { return err } @@ -250,3 +252,11 @@ func writeSubcommands(writer io.Writer, level int, modelName string, commands [] } return nil } + +func formatHyphenatedWords(input string) string { + hyphenRegex := regexp.MustCompile(`\B--\w+\b`) + replacer := func(s string) string { + return fmt.Sprintf("`%s`", s) + } + return hyphenRegex.ReplaceAllStringFunc(input, replacer) +} From de89a8c827640923d68ab4677803803ee51a3894 Mon Sep 17 00:00:00 2001 From: Daniel Swarbrick Date: Sun, 16 Jul 2023 19:23:44 +0200 Subject: [PATCH 261/632] Linode SD: cast InstanceSpec values to int64 to avoid overflows InstanceSpec struct members are untyped integers, so they can overflow on 32-bit arch when bit-shifted left. Signed-off-by: Daniel Swarbrick --- discovery/linode/linode.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index 12b957514..63213c87b 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -304,10 +304,10 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro linodeLabelGroup: model.LabelValue(instance.Group), linodeLabelHypervisor: model.LabelValue(instance.Hypervisor), linodeLabelBackups: model.LabelValue(backupsStatus), - linodeLabelSpecsDiskBytes: model.LabelValue(fmt.Sprintf("%d", instance.Specs.Disk<<20)), - linodeLabelSpecsMemoryBytes: model.LabelValue(fmt.Sprintf("%d", instance.Specs.Memory<<20)), + linodeLabelSpecsDiskBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Disk)<<20)), + linodeLabelSpecsMemoryBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Memory)<<20)), linodeLabelSpecsVCPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.VCPUs)), - linodeLabelSpecsTransferBytes: model.LabelValue(fmt.Sprintf("%d", instance.Specs.Transfer<<20)), + linodeLabelSpecsTransferBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Transfer)<<20)), } addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10)) From 7aa79657163ca2e085d6cea9c1538267266d2772 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20T=C3=B6lle?= Date: Mon, 17 Jul 2023 14:21:18 +0200 Subject: [PATCH 262/632] build(deps): bump github.com/hetznercloud/hcloud-go to v2.0.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Version 2 introduced a breaking change in the `id` field of all resources. They were changed from `int` to `int64` to make sure that all future numerical IDs are supported on all architectures. You can learn more about this [here](https://docs.hetzner.cloud/#deprecation-notices-%E2%9A%A0%EF%B8%8F) Signed-off-by: Julian Tölle --- discovery/hetzner/hcloud.go | 2 +- discovery/hetzner/hetzner.go | 2 +- go.mod | 12 ++++++------ go.sum | 26 +++++++++++++------------- 4 files changed, 21 insertions(+), 21 deletions(-) diff --git a/discovery/hetzner/hcloud.go b/discovery/hetzner/hcloud.go index 50afdc1ec..4bcfde830 100644 --- a/discovery/hetzner/hcloud.go +++ b/discovery/hetzner/hcloud.go @@ -22,7 +22,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/hetznercloud/hcloud-go/hcloud" + "github.com/hetznercloud/hcloud-go/v2/hcloud" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" diff --git a/discovery/hetzner/hetzner.go b/discovery/hetzner/hetzner.go index 084319d95..40b28cc2c 100644 --- a/discovery/hetzner/hetzner.go +++ b/discovery/hetzner/hetzner.go @@ -20,7 +20,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/hetznercloud/hcloud-go/hcloud" + "github.com/hetznercloud/hcloud-go/v2/hcloud" "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/go.mod b/go.mod index 565456ed9..527cc4154 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/consul/api v1.21.0 github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f - github.com/hetznercloud/hcloud-go v1.47.0 + github.com/hetznercloud/hcloud-go/v2 v2.0.0 github.com/ionos-cloud/sdk-go/v6 v6.1.7 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.12 @@ -64,10 +64,10 @@ require ( go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.2 go.uber.org/goleak v1.2.1 - golang.org/x/net v0.11.0 + golang.org/x/net v0.12.0 golang.org/x/oauth2 v0.9.0 golang.org/x/sync v0.2.0 - golang.org/x/sys v0.9.0 + golang.org/x/sys v0.10.0 golang.org/x/time v0.3.0 golang.org/x/tools v0.9.3 google.golang.org/api v0.114.0 @@ -178,11 +178,11 @@ require ( go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect go.opentelemetry.io/otel/metric v1.16.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect - golang.org/x/crypto v0.10.0 // indirect + golang.org/x/crypto v0.11.0 // indirect golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 golang.org/x/mod v0.10.0 // indirect - golang.org/x/term v0.9.0 // indirect - golang.org/x/text v0.10.0 // indirect + golang.org/x/term v0.10.0 // indirect + golang.org/x/text v0.11.0 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 57f9f82c6..512eca125 100644 --- a/go.sum +++ b/go.sum @@ -464,8 +464,8 @@ github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f/go.mod h1:Xjd3 github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go v1.47.0 h1:WMZDwLPtMZwOLWIgERHrrrTzRFdHx0hTygYVQ4VWHW4= -github.com/hetznercloud/hcloud-go v1.47.0/go.mod h1:zSpmBnxIdb5oMdbpVg1Q977Cq2qiwERkjj3jqRbHH5U= +github.com/hetznercloud/hcloud-go/v2 v2.0.0 h1:Sg1DJ+MAKvbYAqaBaq9tPbwXBS2ckPIaMtVdUjKu+4g= +github.com/hetznercloud/hcloud-go/v2 v2.0.0/go.mod h1:4iUG2NG8b61IAwNx6UsMWQ6IfIf/i1RsG0BbsKAyR5Q= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -700,7 +700,7 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= @@ -844,8 +844,8 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= -golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -928,8 +928,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= -golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= +golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1021,14 +1021,14 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= -golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= +golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1039,8 +1039,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= -golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 5a0dea1d91cdd6321b5e7b6f38de850821d6872e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20T=C3=B6lle?= Date: Mon, 17 Jul 2023 14:30:23 +0200 Subject: [PATCH 263/632] docs: use actual flag for signing off commits MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flag is documented as --signoff: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt---signoff Signed-off-by: Julian Tölle --- .github/PULL_REQUEST_TEMPLATE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 9e78957ec..95df72dd0 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,7 +1,7 @@ +```yaml + metric_relabel_configs: + - source_labels: + - quantile + target_label: quantile + regex: (\d+)\.0+ + - source_labels: + - le + - __name__ + target_label: le + regex: (\d+)\.0+;.*_bucket +``` ## OTLP Receiver diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 7be3f3461..08cf37ede 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -3679,6 +3679,131 @@ func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) { require.Equal(t, "750ms", sp.ActiveTargets()[0].labels.Get(model.ScrapeTimeoutLabel)) } +// Testing whether we can remove trailing .0 from histogram 'le' and summary 'quantile' labels. +func TestLeQuantileReLabel(t *testing.T) { + simpleStorage := teststorage.New(t) + defer simpleStorage.Close() + + config := &config.ScrapeConfig{ + JobName: "test", + MetricRelabelConfigs: []*relabel.Config{ + { + SourceLabels: model.LabelNames{"le", "__name__"}, + Regex: relabel.MustNewRegexp("(\\d+)\\.0+;.*_bucket"), + Replacement: relabel.DefaultRelabelConfig.Replacement, + Separator: relabel.DefaultRelabelConfig.Separator, + TargetLabel: "le", + Action: relabel.Replace, + }, + { + SourceLabels: model.LabelNames{"quantile"}, + Regex: relabel.MustNewRegexp("(\\d+)\\.0+"), + Replacement: relabel.DefaultRelabelConfig.Replacement, + Separator: relabel.DefaultRelabelConfig.Separator, + TargetLabel: "quantile", + Action: relabel.Replace, + }, + }, + SampleLimit: 100, + Scheme: "http", + ScrapeInterval: model.Duration(100 * time.Millisecond), + ScrapeTimeout: model.Duration(100 * time.Millisecond), + } + + metricsText := ` +# HELP test_histogram This is a histogram with default buckets +# TYPE test_histogram histogram +test_histogram_bucket{address="0.0.0.0",port="5001",le="0.005"} 0 +test_histogram_bucket{address="0.0.0.0",port="5001",le="0.01"} 0 +test_histogram_bucket{address="0.0.0.0",port="5001",le="0.025"} 0 +test_histogram_bucket{address="0.0.0.0",port="5001",le="0.05"} 0 +test_histogram_bucket{address="0.0.0.0",port="5001",le="0.1"} 0 +test_histogram_bucket{address="0.0.0.0",port="5001",le="0.25"} 0 +test_histogram_bucket{address="0.0.0.0",port="5001",le="0.5"} 0 +test_histogram_bucket{address="0.0.0.0",port="5001",le="1.0"} 0 +test_histogram_bucket{address="0.0.0.0",port="5001",le="2.5"} 0 +test_histogram_bucket{address="0.0.0.0",port="5001",le="5.0"} 0 +test_histogram_bucket{address="0.0.0.0",port="5001",le="10.0"} 0 +test_histogram_bucket{address="0.0.0.0",port="5001",le="+Inf"} 0 +test_histogram_sum{address="0.0.0.0",port="5001"} 0 +test_histogram_count{address="0.0.0.0",port="5001"} 0 +# HELP test_summary Number of inflight requests sampled at a regular interval. Quantile buckets keep track of inflight requests over the last 60s. +# TYPE test_summary summary +test_summary{quantile="0.5"} 0 +test_summary{quantile="0.9"} 0 +test_summary{quantile="0.95"} 0 +test_summary{quantile="0.99"} 0 +test_summary{quantile="1.0"} 1 +test_summary_sum 1 +test_summary_count 199 +` + + // The expected "le" values do not have the trailing ".0". + expectedLeValues := []string{"0.005", "0.01", "0.025", "0.05", "0.1", "0.25", "0.5", "1", "2.5", "5", "10", "+Inf"} + + // The expected "quantile" values do not have the trailing ".0". + expectedQuantileValues := []string{"0.5", "0.9", "0.95", "0.99", "1"} + + scrapeCount := 0 + scraped := make(chan bool) + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, metricsText) + scrapeCount++ + if scrapeCount > 2 { + close(scraped) + } + })) + defer ts.Close() + + sp, err := newScrapePool(config, simpleStorage, 0, nil, &Options{}, newTestScrapeMetrics(t)) + require.NoError(t, err) + defer sp.stop() + + testURL, err := url.Parse(ts.URL) + require.NoError(t, err) + sp.Sync([]*targetgroup.Group{ + { + Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(testURL.Host)}}, + }, + }) + require.Equal(t, 1, len(sp.ActiveTargets())) + + select { + case <-time.After(5 * time.Second): + t.Fatalf("target was not scraped") + case <-scraped: + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + q, err := simpleStorage.Querier(time.Time{}.UnixNano(), time.Now().UnixNano()) + require.NoError(t, err) + defer q.Close() + + checkValues := func(labelName string, expectedValues []string, series storage.SeriesSet) { + foundLeValues := map[string]bool{} + + for series.Next() { + s := series.At() + v := s.Labels().Get(labelName) + require.NotContains(t, foundLeValues, v, "duplicate label value found") + foundLeValues[v] = true + } + + require.Equal(t, len(expectedValues), len(foundLeValues), "number of label values not as expected") + for _, v := range expectedValues { + require.Contains(t, foundLeValues, v, "label value not found") + } + } + + series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "test_histogram_bucket")) + checkValues("le", expectedLeValues, series) + + series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "test_summary")) + checkValues("quantile", expectedQuantileValues, series) +} + func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *testing.T) { appender := &collectResultAppender{} var ( From 7eaefcf379e1b27c3551c6b698a3b1d6bf24d500 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Wed, 1 Nov 2023 20:06:46 +0100 Subject: [PATCH 524/632] ci(lint): enable errorlint on scrape (#12923) Signed-off-by: Matthieu MOREL Signed-off-by: Jesus Vazquez Co-authored-by: Jesus Vazquez --- .golangci.yml | 3 --- scrape/scrape.go | 42 +++++++++++++++++++++--------------------- scrape/scrape_test.go | 16 ++++++++-------- 3 files changed, 29 insertions(+), 32 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index d585dfc74..871d748c7 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -34,9 +34,6 @@ issues: - path: _test.go linters: - errcheck - - path: scrape/ - linters: - - errorlint - path: tsdb/ linters: - errorlint diff --git a/scrape/scrape.go b/scrape/scrape.go index 0ea740b68..790ee18af 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -18,6 +18,7 @@ import ( "bytes" "compress/gzip" "context" + "errors" "fmt" "io" "math" @@ -30,7 +31,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" @@ -122,7 +122,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...) if err != nil { - return nil, errors.Wrap(err, "error creating HTTP client") + return nil, fmt.Errorf("error creating HTTP client: %w", err) } buffers := pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }) @@ -250,7 +250,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, sp.httpOpts...) if err != nil { sp.metrics.targetScrapePoolReloadsFailed.Inc() - return errors.Wrap(err, "error creating HTTP client") + return fmt.Errorf("error creating HTTP client: %w", err) } reuseCache := reusableCache(sp.config, cfg) @@ -695,7 +695,7 @@ func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w }() if resp.StatusCode != http.StatusOK { - return "", errors.Errorf("server returned HTTP status %s", resp.Status) + return "", fmt.Errorf("server returned HTTP status %s", resp.Status) } if s.bodySizeLimit <= 0 { @@ -1549,7 +1549,7 @@ loop: } sampleAdded, err = sl.checkAddError(ce, met, parsedTimestamp, err, &sampleLimitErr, &bucketLimitErr, &appErrs) if err != nil { - if err != storage.ErrNotFound { + if !errors.Is(err, storage.ErrNotFound) { level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err) } break loop @@ -1620,8 +1620,8 @@ loop: sl.cache.forEachStale(func(lset labels.Labels) bool { // Series no longer exposed, mark it stale. _, err = app.Append(0, lset, defTime, math.Float64frombits(value.StaleNaN)) - switch errors.Cause(err) { - case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp: + switch { + case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): // Do not count these in logging, as this is expected if a target // goes away and comes back again with a new scrape loop. err = nil @@ -1635,35 +1635,35 @@ loop: // Adds samples to the appender, checking the error, and then returns the # of samples added, // whether the caller should continue to process more samples, and any sample or bucket limit errors. func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) { - switch errors.Cause(err) { - case nil: + switch { + case err == nil: if (tp == nil || sl.trackTimestampsStaleness) && ce != nil { sl.cache.trackStaleness(ce.hash, ce.lset) } return true, nil - case storage.ErrNotFound: + case errors.Is(err, storage.ErrNotFound): return false, storage.ErrNotFound - case storage.ErrOutOfOrderSample: + case errors.Is(err, storage.ErrOutOfOrderSample): appErrs.numOutOfOrder++ level.Debug(sl.l).Log("msg", "Out of order sample", "series", string(met)) sl.metrics.targetScrapeSampleOutOfOrder.Inc() return false, nil - case storage.ErrDuplicateSampleForTimestamp: + case errors.Is(err, storage.ErrDuplicateSampleForTimestamp): appErrs.numDuplicates++ level.Debug(sl.l).Log("msg", "Duplicate sample for timestamp", "series", string(met)) sl.metrics.targetScrapeSampleDuplicate.Inc() return false, nil - case storage.ErrOutOfBounds: + case errors.Is(err, storage.ErrOutOfBounds): appErrs.numOutOfBounds++ level.Debug(sl.l).Log("msg", "Out of bounds metric", "series", string(met)) sl.metrics.targetScrapeSampleOutOfBounds.Inc() return false, nil - case errSampleLimit: + case errors.Is(err, errSampleLimit): // Keep on parsing output if we hit the limit, so we report the correct // total number of samples scraped. *sampleLimitErr = err return false, nil - case errBucketLimit: + case errors.Is(err, errBucketLimit): // Keep on parsing output if we hit the limit, so we report the correct // total number of samples scraped. *bucketLimitErr = err @@ -1674,10 +1674,10 @@ func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err e } func (sl *scrapeLoop) checkAddExemplarError(err error, e exemplar.Exemplar, appErrs *appendErrors) error { - switch errors.Cause(err) { - case storage.ErrNotFound: + switch { + case errors.Is(err, storage.ErrNotFound): return storage.ErrNotFound - case storage.ErrOutOfOrderExemplar: + case errors.Is(err, storage.ErrOutOfOrderExemplar): appErrs.numExemplarOutOfOrder++ level.Debug(sl.l).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e)) sl.metrics.targetScrapeExemplarOutOfOrder.Inc() @@ -1789,13 +1789,13 @@ func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v } ref, err := app.Append(ref, lset, t, v) - switch errors.Cause(err) { - case nil: + switch { + case err == nil: if !ok { sl.cache.addRef(s, ref, lset, lset.Hash()) } return nil - case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp: + case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): // Do not log here, as this is expected if a target goes away and comes back // again with a new scrape loop. return nil diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 08cf37ede..ccd651f49 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -18,6 +18,7 @@ import ( "compress/gzip" "context" "encoding/binary" + "errors" "fmt" "io" "math" @@ -31,7 +32,6 @@ import ( "github.com/go-kit/log" "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" config_util "github.com/prometheus/common/config" @@ -882,7 +882,7 @@ func TestScrapeLoopRun(t *testing.T) { select { case err := <-errc: - if err != context.DeadlineExceeded { + if !errors.Is(err, context.DeadlineExceeded) { t.Fatalf("Expected timeout error but got: %s", err) } case <-time.After(3 * time.Second): @@ -952,7 +952,7 @@ func TestScrapeLoopForcedErr(t *testing.T) { select { case err := <-errc: - if err != forcedErr { + if !errors.Is(err, forcedErr) { t.Fatalf("Expected forced error but got: %s", err) } case <-time.After(3 * time.Second): @@ -1741,7 +1741,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) { now := time.Now() slApp := sl.appender(context.Background()) total, added, seriesAdded, err := sl.append(app, []byte("metric_a 1\nmetric_b 1\nmetric_c 1\n"), "", now) - if err != errSampleLimit { + if !errors.Is(err, errSampleLimit) { t.Fatalf("Did not see expected sample limit error: %s", err) } require.NoError(t, slApp.Rollback()) @@ -1772,7 +1772,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) { now = time.Now() slApp = sl.appender(context.Background()) total, added, seriesAdded, err = sl.append(slApp, []byte("metric_a 1\nmetric_b 1\nmetric_c{deleteme=\"yes\"} 1\nmetric_d 1\nmetric_e 1\nmetric_f 1\nmetric_g 1\nmetric_h{deleteme=\"yes\"} 1\nmetric_i{deleteme=\"yes\"} 1\n"), "", now) - if err != errSampleLimit { + if !errors.Is(err, errSampleLimit) { t.Fatalf("Did not see expected sample limit error: %s", err) } require.NoError(t, slApp.Rollback()) @@ -1868,7 +1868,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { now = time.Now() total, added, seriesAdded, err = sl.append(app, msg, "application/vnd.google.protobuf", now) - if err != errBucketLimit { + if !errors.Is(err, errBucketLimit) { t.Fatalf("Did not see expected histogram bucket limit error: %s", err) } require.NoError(t, app.Rollback()) @@ -2738,8 +2738,8 @@ func TestTargetScrapeScrapeCancel(t *testing.T) { switch { case err == nil: errc <- errors.New("Expected error but got nil") - case ctx.Err() != context.Canceled: - errc <- errors.Errorf("Expected context cancellation error but got: %s", ctx.Err()) + case !errors.Is(ctx.Err(), context.Canceled): + errc <- fmt.Errorf("Expected context cancellation error but got: %w", ctx.Err()) default: close(errc) } From f9d060c65f2f0f550af892281988ec0d1347de69 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Nov 2023 23:30:13 +0000 Subject: [PATCH 525/632] build(deps): bump github.com/prometheus/common Bumps [github.com/prometheus/common](https://github.com/prometheus/common) from 0.44.0 to 0.45.0. - [Release notes](https://github.com/prometheus/common/releases) - [Commits](https://github.com/prometheus/common/compare/v0.44.0...v0.45.0) --- updated-dependencies: - dependency-name: github.com/prometheus/common dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 14 ++++----- documentation/examples/remote_storage/go.sum | 30 ++++++++++---------- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 837f6a6c7..1014b0aad 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -9,7 +9,7 @@ require ( github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb v1.11.2 github.com/prometheus/client_golang v1.17.0 - github.com/prometheus/common v0.44.0 + github.com/prometheus/common v0.45.0 github.com/prometheus/prometheus v0.47.2 github.com/stretchr/testify v1.8.4 ) @@ -39,7 +39,7 @@ require ( github.com/klauspost/compress v1.16.7 // indirect github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect @@ -58,12 +58,12 @@ require ( go.opentelemetry.io/otel/trace v1.16.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.11.0 // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect - golang.org/x/net v0.12.0 // indirect - golang.org/x/oauth2 v0.10.0 // indirect - golang.org/x/sys v0.11.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.12.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 398fc3344..64ced7423 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -167,8 +167,8 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -211,8 +211,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -266,8 +266,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -290,12 +290,12 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= -golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= +golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -323,20 +323,20 @@ golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From b9f75ceeddb18f9a375f743c98996c2e9154ad2b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Nov 2023 23:32:04 +0000 Subject: [PATCH 526/632] build(deps): bump github/codeql-action from 2.21.9 to 2.22.5 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2.21.9 to 2.22.5. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/ddccb873888234080b77e9bc2d4764d5ccaaccf9...74483a38d39275f33fcff5f35b679b5ca4a26a99) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql-analysis.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index d79f2b9d4..d98233757 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -30,12 +30,12 @@ jobs: go-version: '>=1.21 <1.22' - name: Initialize CodeQL - uses: github/codeql-action/init@ddccb873888234080b77e9bc2d4764d5ccaaccf9 # v2.21.9 + uses: github/codeql-action/init@74483a38d39275f33fcff5f35b679b5ca4a26a99 # v2.22.5 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@ddccb873888234080b77e9bc2d4764d5ccaaccf9 # v2.21.9 + uses: github/codeql-action/autobuild@74483a38d39275f33fcff5f35b679b5ca4a26a99 # v2.22.5 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@ddccb873888234080b77e9bc2d4764d5ccaaccf9 # v2.21.9 + uses: github/codeql-action/analyze@74483a38d39275f33fcff5f35b679b5ca4a26a99 # v2.22.5 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 6249ba623..48278dae7 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -45,6 +45,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@ddccb873888234080b77e9bc2d4764d5ccaaccf9 # tag=v2.21.9 + uses: github/codeql-action/upload-sarif@74483a38d39275f33fcff5f35b679b5ca4a26a99 # tag=v2.22.5 with: sarif_file: results.sarif From b4448e0ef252f27c436e9157c07e4d54021c3d72 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Nov 2023 23:32:08 +0000 Subject: [PATCH 527/632] build(deps): bump actions/checkout from 4.1.0 to 4.1.1 Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.0 to 4.1.1. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/8ade135a41bc03ea155e62e844d188df1ea18608...b4ffde65f46336ab88eb53be808477a3936bae11) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/buf-lint.yml | 2 +- .github/workflows/buf.yml | 2 +- .github/workflows/ci.yml | 22 +++++++++++----------- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/repo_sync.yml | 2 +- .github/workflows/scorecards.yml | 2 +- 6 files changed, 16 insertions(+), 16 deletions(-) diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index b44ba0511..85109b39a 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -12,7 +12,7 @@ jobs: name: lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: bufbuild/buf-setup-action@eb60cd0de4f14f1f57cf346916b8cd69a9e7ed0b # v1.26.1 with: github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index 58c1bc198..c2c9dc070 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest if: github.repository_owner == 'prometheus' steps: - - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: bufbuild/buf-setup-action@eb60cd0de4f14f1f57cf346916b8cd69a9e7ed0b # v1.26.1 with: github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 08fbb0a33..1f574ca6d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,7 +13,7 @@ jobs: container: image: quay.io/prometheus/golang-builder:1.21-base steps: - - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: ./.github/promci/actions/setup_environment - run: make GO_ONLY=1 SKIP_GOLANGCI_LINT=1 @@ -35,7 +35,7 @@ jobs: image: quay.io/prometheus/golang-builder:1.21-base steps: - - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: ./.github/promci/actions/setup_environment with: @@ -52,7 +52,7 @@ jobs: name: Go tests on Windows runs-on: windows-latest steps: - - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0 with: go-version: '>=1.21 <1.22' @@ -68,7 +68,7 @@ jobs: container: image: quay.io/prometheus/golang-builder:1.20-base steps: - - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - run: make build - run: go test ./tsdb/... - run: go test ./tsdb/ -test.tsdb-isolation=false @@ -81,7 +81,7 @@ jobs: container: image: quay.io/prometheus/golang-builder:1.20-base steps: - - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - run: go install ./cmd/promtool/. - run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest - run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest @@ -104,7 +104,7 @@ jobs: matrix: thread: [ 0, 1, 2 ] steps: - - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: ./.github/promci/actions/build with: @@ -127,7 +127,7 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. steps: - - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: ./.github/promci/actions/build with: @@ -138,7 +138,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Install Go uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0 with: @@ -164,7 +164,7 @@ jobs: needs: [test_ui, test_go, test_windows, golangci, codeql, build_all] if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' steps: - - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: ./.github/promci/actions/publish_main with: @@ -178,7 +178,7 @@ jobs: needs: [test_ui, test_go, test_windows, golangci, codeql, build_all] if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') steps: - - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: ./.github/promci/actions/publish_release with: @@ -193,7 +193,7 @@ jobs: needs: [test_ui, codeql] steps: - name: Checkout - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - name: Install nodejs uses: actions/setup-node@5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d # v3.8.1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index d79f2b9d4..ead87d42f 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -24,7 +24,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0 with: go-version: '>=1.21 <1.22' diff --git a/.github/workflows/repo_sync.yml b/.github/workflows/repo_sync.yml index 368b98828..1cf2eee24 100644 --- a/.github/workflows/repo_sync.yml +++ b/.github/workflows/repo_sync.yml @@ -13,7 +13,7 @@ jobs: container: image: quay.io/prometheus/golang-builder steps: - - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - run: ./scripts/sync_repo_files.sh env: GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }} diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 6249ba623..d473339c5 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -21,7 +21,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # tag=v4.1.0 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # tag=v4.1.1 with: persist-credentials: false From 1c272a1f4fb5c78decbb1c2b2513594942a7f2a4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Nov 2023 23:32:14 +0000 Subject: [PATCH 528/632] build(deps): bump ossf/scorecard-action from 2.2.0 to 2.3.1 Bumps [ossf/scorecard-action](https://github.com/ossf/scorecard-action) from 2.2.0 to 2.3.1. - [Release notes](https://github.com/ossf/scorecard-action/releases) - [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md) - [Commits](https://github.com/ossf/scorecard-action/compare/08b4669551908b1024bb425080c797723083c031...0864cf19026789058feabb7e87baa5f140aac736) --- updated-dependencies: - dependency-name: ossf/scorecard-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/scorecards.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 6249ba623..1bc6a436f 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -26,7 +26,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@08b4669551908b1024bb425080c797723083c031 # tag=v2.2.0 + uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # tag=v2.3.1 with: results_file: results.sarif results_format: sarif From ae2b00d77d5b8c949318fcfde91355fb6de86f00 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Nov 2023 23:43:56 +0000 Subject: [PATCH 529/632] build(deps): bump the k8s-io group with 2 updates Bumps the k8s-io group with 2 updates: [k8s.io/api](https://github.com/kubernetes/api) and [k8s.io/client-go](https://github.com/kubernetes/client-go). Updates `k8s.io/api` from 0.28.2 to 0.28.3 - [Commits](https://github.com/kubernetes/api/compare/v0.28.2...v0.28.3) Updates `k8s.io/client-go` from 0.28.2 to 0.28.3 - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.28.2...v0.28.3) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-io - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-io ... Signed-off-by: dependabot[bot] --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 7adaacfd7..00df80701 100644 --- a/go.mod +++ b/go.mod @@ -81,9 +81,9 @@ require ( google.golang.org/protobuf v1.31.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.28.2 - k8s.io/apimachinery v0.28.2 - k8s.io/client-go v0.28.2 + k8s.io/api v0.28.3 + k8s.io/apimachinery v0.28.3 + k8s.io/client-go v0.28.3 k8s.io/klog v1.0.0 k8s.io/klog/v2 v2.100.1 ) diff --git a/go.sum b/go.sum index 1a08b123c..bba0f8f02 100644 --- a/go.sum +++ b/go.sum @@ -1208,12 +1208,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= -k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg= -k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ= -k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU= -k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY= -k8s.io/client-go v0.28.2/go.mod h1:sMkApowspLuc7omj1FOSUxSoqjr+d5Q0Yc0LOFnYFJY= +k8s.io/api v0.28.3 h1:Gj1HtbSdB4P08C8rs9AR94MfSGpRhJgsS+GF9V26xMM= +k8s.io/api v0.28.3/go.mod h1:MRCV/jr1dW87/qJnZ57U5Pak65LGmQVkKTzf3AtKFHc= +k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= +k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= +k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= +k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= From 1103569841c8bcd818a265e6c7d95360880f6334 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Nov 2023 23:44:15 +0000 Subject: [PATCH 530/632] build(deps): bump the go-opentelemetry-io group with 2 updates Bumps the go-opentelemetry-io group with 2 updates: [go.opentelemetry.io/collector/pdata](https://github.com/open-telemetry/opentelemetry-collector) and [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector). Updates `go.opentelemetry.io/collector/pdata` from 1.0.0-rcv0016 to 1.0.0-rcv0017 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/pdata/v1.0.0-rcv0016...pdata/v1.0.0-rcv0017) Updates `go.opentelemetry.io/collector/semconv` from 0.87.0 to 0.88.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.87.0...v0.88.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/collector/pdata dependency-type: direct:production update-type: version-update:semver-patch dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/collector/semconv dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io ... Signed-off-by: dependabot[bot] --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 7adaacfd7..7d9ed9c1f 100644 --- a/go.mod +++ b/go.mod @@ -55,8 +55,8 @@ require ( github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.8.4 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 - go.opentelemetry.io/collector/semconv v0.87.0 + go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 + go.opentelemetry.io/collector/semconv v0.88.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 go.opentelemetry.io/otel v1.19.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 @@ -77,7 +77,7 @@ require ( golang.org/x/tools v0.14.0 google.golang.org/api v0.147.0 google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a - google.golang.org/grpc v1.58.3 + google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.31.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -136,7 +136,7 @@ require ( github.com/go-openapi/swag v0.22.4 // indirect github.com/go-openapi/validate v0.22.1 // indirect github.com/go-resty/resty/v2 v2.7.0 // indirect - github.com/golang/glog v1.1.0 // indirect + github.com/golang/glog v1.1.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cmp v0.6.0 // indirect diff --git a/go.sum b/go.sum index 1a08b123c..265ed3572 100644 --- a/go.sum +++ b/go.sum @@ -277,8 +277,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -760,10 +760,10 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 h1:qCPXSQCoD3qeWFb1RuIks8fw9Atxpk78bmtVdi15KhE= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0016/go.mod h1:OdN0alYOlYhHXu6BDlGehrZWgtBuiDsz/rlNeJeXiNg= -go.opentelemetry.io/collector/semconv v0.87.0 h1:BsG1jdLLRCBRlvUujk4QA86af7r/ZXnizczQpEs/gg8= -go.opentelemetry.io/collector/semconv v0.87.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 h1:AgALhc2VenoA5l1DvTdg7mkzaBGqoTSuMkAtjsttBFo= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0017/go.mod h1:Rv9fOclA5AtM/JGm0d4jBOIAo1+jBA13UT5Bx0ovXi4= +go.opentelemetry.io/collector/semconv v0.88.0 h1:8TVP4hYaUC87S6CCLKNoSxsUE0ChldE4vqotvNHHUnE= +go.opentelemetry.io/collector/semconv v0.88.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= @@ -1147,8 +1147,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= -google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 2d6f27f10a6e44ddcc110043f17e89182a92458a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Nov 2023 23:55:51 +0000 Subject: [PATCH 531/632] build(deps): bump actions/checkout from 4.1.0 to 4.1.1 in /scripts Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.0 to 4.1.1. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/8ade135a41bc03ea155e62e844d188df1ea18608...b4ffde65f46336ab88eb53be808477a3936bae11) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- scripts/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index 15cf547be..babd8a0c4 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -18,7 +18,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: install Go uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0 with: From 8b87f3bbf1f9f957fcdc7618cd19ea707a136f21 Mon Sep 17 00:00:00 2001 From: Levi Harrison Date: Wed, 1 Nov 2023 21:19:24 -0400 Subject: [PATCH 532/632] Release 2.48.0-rc.2 Signed-off-by: Levi Harrison --- CHANGELOG.md | 6 ++++++ VERSION | 2 +- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 18 +++++++++--------- web/ui/package.json | 2 +- web/ui/react-app/package.json | 4 ++-- 7 files changed, 22 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bc14091cd..35f9f7768 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 2.48.0-rc.2 / 2023-11-02 + +* [ENHANCEMENT] Scraping: Add configuration option for tracking staleness of scraped timestamps. #13060 +* [BUGFIX] Storage: Fix crash caused by incorrect mixed samples handling. #13055 +* [BUGFIX] TSDB: Fix compactor failures by adding min time to histogram chunks. #13062 + ## 2.48.0-rc.1 / 2023-10-24 * [BUGFIX] PromQL: Reduce inefficiency introduced by warnings/annotations and temporarily remove possible non-counter warnings. #13012 diff --git a/VERSION b/VERSION index f2bfe03db..10429c7ad 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.48.0-rc.1 +2.48.0-rc.2 diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 0e62d640a..408891028 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.48.0-rc.1", + "version": "0.48.0-rc.2", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.48.0-rc.1", + "@prometheus-io/lezer-promql": "0.48.0-rc.2", "lru-cache": "^7.18.3" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index d38b3288d..9f4a19d00 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.48.0-rc.1", + "version": "0.48.0-rc.2", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index f951aba10..43cda8b13 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.48.0-rc.1", + "version": "0.48.0-rc.2", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.48.0-rc.1", + "version": "0.48.0-rc.2", "workspaces": [ "react-app", "module/*" @@ -30,10 +30,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.48.0-rc.1", + "version": "0.48.0-rc.2", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.48.0-rc.1", + "@prometheus-io/lezer-promql": "0.48.0-rc.2", "lru-cache": "^7.18.3" }, "devDependencies": { @@ -70,7 +70,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.48.0-rc.1", + "version": "0.48.0-rc.2", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.2.3", @@ -20764,7 +20764,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.48.0-rc.1", + "version": "0.48.0-rc.2", "dependencies": { "@codemirror/autocomplete": "^6.7.1", "@codemirror/commands": "^6.2.4", @@ -20782,7 +20782,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.48.0-rc.1", + "@prometheus-io/codemirror-promql": "0.48.0-rc.2", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.0", @@ -23422,7 +23422,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.48.0-rc.1", + "@prometheus-io/codemirror-promql": "0.48.0-rc.2", "@testing-library/react-hooks": "^7.0.2", "@types/enzyme": "^3.10.13", "@types/flot": "0.0.32", @@ -23486,7 +23486,7 @@ "@lezer/common": "^1.0.3", "@lezer/highlight": "^1.1.6", "@lezer/lr": "^1.3.6", - "@prometheus-io/lezer-promql": "0.48.0-rc.1", + "@prometheus-io/lezer-promql": "0.48.0-rc.2", "isomorphic-fetch": "^3.0.0", "lru-cache": "^7.18.3", "nock": "^13.3.1" diff --git a/web/ui/package.json b/web/ui/package.json index 76b5ad2a8..9b4dfbc62 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -28,5 +28,5 @@ "ts-jest": "^29.1.0", "typescript": "^4.9.5" }, - "version": "0.48.0-rc.1" + "version": "0.48.0-rc.2" } diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 3d1de883a..de0d0e315 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.48.0-rc.1", + "version": "0.48.0-rc.2", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.7.1", @@ -19,7 +19,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.48.0-rc.1", + "@prometheus-io/codemirror-promql": "0.48.0-rc.2", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.0", From 8274e248addacbca809f78012d333adba8c1a8dd Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Thu, 2 Nov 2023 15:29:09 +1100 Subject: [PATCH 533/632] Fix issue where `concatenatingChunkIterator` can obscure errors. Signed-off-by: Charles Korn --- storage/merge.go | 3 +++ storage/merge_test.go | 59 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) diff --git a/storage/merge.go b/storage/merge.go index 50ae88ce0..501e8db09 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -860,6 +860,9 @@ func (c *concatenatingChunkIterator) Next() bool { c.curr = c.iterators[c.idx].At() return true } + if c.iterators[c.idx].Err() != nil { + return false + } c.idx++ return c.Next() } diff --git a/storage/merge_test.go b/storage/merge_test.go index f68261d27..25c8fa4a8 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -853,6 +853,65 @@ func TestConcatenatingChunkSeriesMerger(t *testing.T) { } } +func TestConcatenatingChunkIterator(t *testing.T) { + chunk1, err := chunks.ChunkFromSamples([]chunks.Sample{fSample{t: 1, f: 10}}) + require.NoError(t, err) + chunk2, err := chunks.ChunkFromSamples([]chunks.Sample{fSample{t: 2, f: 20}}) + require.NoError(t, err) + chunk3, err := chunks.ChunkFromSamples([]chunks.Sample{fSample{t: 3, f: 30}}) + require.NoError(t, err) + + testError := errors.New("something went wrong") + + testCases := map[string]struct { + iterators []chunks.Iterator + expectedChunks []chunks.Meta + expectedError error + }{ + "many successful iterators": { + iterators: []chunks.Iterator{ + NewListChunkSeriesIterator(chunk1, chunk2), + NewListChunkSeriesIterator(chunk3), + }, + expectedChunks: []chunks.Meta{chunk1, chunk2, chunk3}, + }, + "single failing iterator": { + iterators: []chunks.Iterator{ + errChunksIterator{err: testError}, + }, + expectedError: testError, + }, + "some failing and some successful iterators": { + iterators: []chunks.Iterator{ + NewListChunkSeriesIterator(chunk1, chunk2), + errChunksIterator{err: testError}, + NewListChunkSeriesIterator(chunk3), + }, + expectedChunks: []chunks.Meta{chunk1, chunk2}, // Should stop before advancing to last iterator. + expectedError: testError, + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + it := concatenatingChunkIterator{iterators: testCase.iterators} + var chks []chunks.Meta + + for it.Next() { + chks = append(chks, it.At()) + } + + require.Equal(t, testCase.expectedChunks, chks) + + if testCase.expectedError == nil { + require.NoError(t, it.Err()) + } else { + require.EqualError(t, it.Err(), testCase.expectedError.Error()) + } + }) + } +} + type mockQuerier struct { LabelQuerier From 46be85f2dc054920bcf0fa7718de099436ee8491 Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Wed, 1 Nov 2023 19:53:41 +0800 Subject: [PATCH 534/632] Make TestPopulateWithDelSeriesIterator tests cover histogram types and check MinTime Signed-off-by: Jeanette Tan --- tsdb/querier_test.go | 237 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 195 insertions(+), 42 deletions(-) diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index fc6c68801..096e0ff72 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -1160,65 +1160,218 @@ func rmChunkRefs(chks []chunks.Meta) { // Regression for: https://github.com/prometheus/tsdb/pull/97 func TestPopulateWithDelSeriesIterator_DoubleSeek(t *testing.T) { - f, chkMetas := createFakeReaderAndNotPopulatedChunks( - []chunks.Sample{}, - []chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, - []chunks.Sample{sample{4, 4, nil, nil}, sample{5, 5, nil, nil}}, - ) - - it := &populateWithDelSeriesIterator{} - it.reset(ulid.ULID{}, f, chkMetas, nil) - require.Equal(t, chunkenc.ValFloat, it.Seek(1)) - require.Equal(t, chunkenc.ValFloat, it.Seek(2)) - require.Equal(t, chunkenc.ValFloat, it.Seek(2)) - ts, v := it.At() - require.Equal(t, int64(2), ts) - require.Equal(t, float64(2), v) + t.Run("float", func(t *testing.T) { + valType := chunkenc.ValFloat + f, chkMetas := createFakeReaderAndNotPopulatedChunks( + []chunks.Sample{}, + []chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, + []chunks.Sample{sample{4, 4, nil, nil}, sample{5, 5, nil, nil}}, + ) + it := &populateWithDelSeriesIterator{} + it.reset(ulid.ULID{}, f, chkMetas, nil) + require.Equal(t, valType, it.Seek(1)) + require.Equal(t, valType, it.Seek(2)) + require.Equal(t, valType, it.Seek(2)) + ts, v := it.At() + require.Equal(t, int64(2), ts) + require.Equal(t, float64(2), v) + require.Equal(t, int64(0), chkMetas[0].MinTime) + require.Equal(t, int64(1), chkMetas[1].MinTime) + require.Equal(t, int64(4), chkMetas[2].MinTime) + }) + t.Run("histogram", func(t *testing.T) { + valType := chunkenc.ValHistogram + f, chkMetas := createFakeReaderAndNotPopulatedChunks( + []chunks.Sample{}, + []chunks.Sample{sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil}, sample{3, 0, tsdbutil.GenerateTestHistogram(3), nil}}, + []chunks.Sample{sample{4, 0, tsdbutil.GenerateTestHistogram(4), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(5), nil}}, + ) + it := &populateWithDelSeriesIterator{} + it.reset(ulid.ULID{}, f, chkMetas, nil) + require.Equal(t, valType, it.Seek(1)) + require.Equal(t, valType, it.Seek(2)) + require.Equal(t, valType, it.Seek(2)) + ts, h := it.AtHistogram() + require.Equal(t, int64(2), ts) + h.CounterResetHint = histogram.UnknownCounterReset + require.Equal(t, tsdbutil.GenerateTestHistogram(2), h) + require.Equal(t, int64(0), chkMetas[0].MinTime) + require.Equal(t, int64(1), chkMetas[1].MinTime) + require.Equal(t, int64(4), chkMetas[2].MinTime) + }) + t.Run("float histogram", func(t *testing.T) { + valType := chunkenc.ValFloatHistogram + f, chkMetas := createFakeReaderAndNotPopulatedChunks( + []chunks.Sample{}, + []chunks.Sample{sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)}}, + []chunks.Sample{sample{4, 0, nil, tsdbutil.GenerateTestFloatHistogram(4)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)}}, + ) + it := &populateWithDelSeriesIterator{} + it.reset(ulid.ULID{}, f, chkMetas, nil) + require.Equal(t, valType, it.Seek(1)) + require.Equal(t, valType, it.Seek(2)) + require.Equal(t, valType, it.Seek(2)) + ts, h := it.AtFloatHistogram() + require.Equal(t, int64(2), ts) + h.CounterResetHint = histogram.UnknownCounterReset + require.Equal(t, tsdbutil.GenerateTestFloatHistogram(2), h) + require.Equal(t, int64(0), chkMetas[0].MinTime) + require.Equal(t, int64(1), chkMetas[1].MinTime) + require.Equal(t, int64(4), chkMetas[2].MinTime) + }) } // Regression when seeked chunks were still found via binary search and we always // skipped to the end when seeking a value in the current chunk. func TestPopulateWithDelSeriesIterator_SeekInCurrentChunk(t *testing.T) { - f, chkMetas := createFakeReaderAndNotPopulatedChunks( - []chunks.Sample{}, - []chunks.Sample{sample{1, 2, nil, nil}, sample{3, 4, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}}, - []chunks.Sample{}, - ) + t.Run("float", func(t *testing.T) { + valType := chunkenc.ValFloat + f, chkMetas := createFakeReaderAndNotPopulatedChunks( + []chunks.Sample{}, + []chunks.Sample{sample{1, 2, nil, nil}, sample{3, 4, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}}, + []chunks.Sample{}, + ) - it := &populateWithDelSeriesIterator{} - it.reset(ulid.ULID{}, f, chkMetas, nil) - require.Equal(t, chunkenc.ValFloat, it.Next()) - ts, v := it.At() - require.Equal(t, int64(1), ts) - require.Equal(t, float64(2), v) + it := &populateWithDelSeriesIterator{} + it.reset(ulid.ULID{}, f, chkMetas, nil) + require.Equal(t, valType, it.Next()) + ts, v := it.At() + require.Equal(t, int64(1), ts) + require.Equal(t, float64(2), v) - require.Equal(t, chunkenc.ValFloat, it.Seek(4)) - ts, v = it.At() - require.Equal(t, int64(5), ts) - require.Equal(t, float64(6), v) + require.Equal(t, valType, it.Seek(4)) + ts, v = it.At() + require.Equal(t, int64(5), ts) + require.Equal(t, float64(6), v) + + require.Equal(t, int64(0), chkMetas[0].MinTime) + require.Equal(t, int64(1), chkMetas[1].MinTime) + require.Equal(t, int64(0), chkMetas[2].MinTime) + }) + t.Run("histogram", func(t *testing.T) { + valType := chunkenc.ValHistogram + f, chkMetas := createFakeReaderAndNotPopulatedChunks( + []chunks.Sample{}, + []chunks.Sample{sample{1, 0, tsdbutil.GenerateTestHistogram(2), nil}, sample{3, 0, tsdbutil.GenerateTestHistogram(4), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil}}, + []chunks.Sample{}, + ) + + it := &populateWithDelSeriesIterator{} + it.reset(ulid.ULID{}, f, chkMetas, nil) + require.Equal(t, valType, it.Next()) + ts, h := it.AtHistogram() + require.Equal(t, int64(1), ts) + require.Equal(t, tsdbutil.GenerateTestHistogram(2), h) + + require.Equal(t, valType, it.Seek(4)) + ts, h = it.AtHistogram() + require.Equal(t, int64(5), ts) + h.CounterResetHint = histogram.UnknownCounterReset + require.Equal(t, tsdbutil.GenerateTestHistogram(6), h) + + require.Equal(t, int64(0), chkMetas[0].MinTime) + require.Equal(t, int64(1), chkMetas[1].MinTime) + require.Equal(t, int64(0), chkMetas[2].MinTime) + }) + t.Run("float histogram", func(t *testing.T) { + valType := chunkenc.ValFloatHistogram + f, chkMetas := createFakeReaderAndNotPopulatedChunks( + []chunks.Sample{}, + []chunks.Sample{sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(4)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}}, + []chunks.Sample{}, + ) + + it := &populateWithDelSeriesIterator{} + it.reset(ulid.ULID{}, f, chkMetas, nil) + require.Equal(t, valType, it.Next()) + ts, h := it.AtFloatHistogram() + require.Equal(t, int64(1), ts) + require.Equal(t, tsdbutil.GenerateTestFloatHistogram(2), h) + + require.Equal(t, valType, it.Seek(4)) + ts, h = it.AtFloatHistogram() + require.Equal(t, int64(5), ts) + h.CounterResetHint = histogram.UnknownCounterReset + require.Equal(t, tsdbutil.GenerateTestFloatHistogram(6), h) + + require.Equal(t, int64(0), chkMetas[0].MinTime) + require.Equal(t, int64(1), chkMetas[1].MinTime) + require.Equal(t, int64(0), chkMetas[2].MinTime) + }) } func TestPopulateWithDelSeriesIterator_SeekWithMinTime(t *testing.T) { - f, chkMetas := createFakeReaderAndNotPopulatedChunks( - []chunks.Sample{sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{6, 8, nil, nil}}, - ) + t.Run("float", func(t *testing.T) { + valType := chunkenc.ValFloat + f, chkMetas := createFakeReaderAndNotPopulatedChunks( + []chunks.Sample{sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{6, 8, nil, nil}}, + ) - it := &populateWithDelSeriesIterator{} - it.reset(ulid.ULID{}, f, chkMetas, nil) - require.Equal(t, chunkenc.ValNone, it.Seek(7)) - require.Equal(t, chunkenc.ValFloat, it.Seek(3)) + it := &populateWithDelSeriesIterator{} + it.reset(ulid.ULID{}, f, chkMetas, nil) + require.Equal(t, chunkenc.ValNone, it.Seek(7)) + require.Equal(t, valType, it.Seek(3)) + require.Equal(t, int64(1), chkMetas[0].MinTime) + }) + t.Run("histogram", func(t *testing.T) { + valType := chunkenc.ValHistogram + f, chkMetas := createFakeReaderAndNotPopulatedChunks( + []chunks.Sample{sample{1, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{6, 0, tsdbutil.GenerateTestHistogram(8), nil}}, + ) + + it := &populateWithDelSeriesIterator{} + it.reset(ulid.ULID{}, f, chkMetas, nil) + require.Equal(t, chunkenc.ValNone, it.Seek(7)) + require.Equal(t, valType, it.Seek(3)) + require.Equal(t, int64(1), chkMetas[0].MinTime) + }) + t.Run("float histogram", func(t *testing.T) { + valType := chunkenc.ValFloatHistogram + f, chkMetas := createFakeReaderAndNotPopulatedChunks( + []chunks.Sample{sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}}, + ) + + it := &populateWithDelSeriesIterator{} + it.reset(ulid.ULID{}, f, chkMetas, nil) + require.Equal(t, chunkenc.ValNone, it.Seek(7)) + require.Equal(t, valType, it.Seek(3)) + require.Equal(t, int64(1), chkMetas[0].MinTime) + }) } // Regression when calling Next() with a time bounded to fit within two samples. // Seek gets called and advances beyond the max time, which was just accepted as a valid sample. func TestPopulateWithDelSeriesIterator_NextWithMinTime(t *testing.T) { - f, chkMetas := createFakeReaderAndNotPopulatedChunks( - []chunks.Sample{sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}}, - ) + t.Run("float", func(t *testing.T) { + f, chkMetas := createFakeReaderAndNotPopulatedChunks( + []chunks.Sample{sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}}, + ) - it := &populateWithDelSeriesIterator{} - it.reset(ulid.ULID{}, f, chkMetas, tombstones.Intervals{{Mint: math.MinInt64, Maxt: 2}}.Add(tombstones.Interval{Mint: 4, Maxt: math.MaxInt64})) - require.Equal(t, chunkenc.ValNone, it.Next()) + it := &populateWithDelSeriesIterator{} + it.reset(ulid.ULID{}, f, chkMetas, tombstones.Intervals{{Mint: math.MinInt64, Maxt: 2}}.Add(tombstones.Interval{Mint: 4, Maxt: math.MaxInt64})) + require.Equal(t, chunkenc.ValNone, it.Next()) + require.Equal(t, int64(1), chkMetas[0].MinTime) + }) + t.Run("histogram", func(t *testing.T) { + f, chkMetas := createFakeReaderAndNotPopulatedChunks( + []chunks.Sample{sample{1, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil}}, + ) + + it := &populateWithDelSeriesIterator{} + it.reset(ulid.ULID{}, f, chkMetas, tombstones.Intervals{{Mint: math.MinInt64, Maxt: 2}}.Add(tombstones.Interval{Mint: 4, Maxt: math.MaxInt64})) + require.Equal(t, chunkenc.ValNone, it.Next()) + require.Equal(t, int64(1), chkMetas[0].MinTime) + }) + t.Run("float histogram", func(t *testing.T) { + f, chkMetas := createFakeReaderAndNotPopulatedChunks( + []chunks.Sample{sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}}, + ) + + it := &populateWithDelSeriesIterator{} + it.reset(ulid.ULID{}, f, chkMetas, tombstones.Intervals{{Mint: math.MinInt64, Maxt: 2}}.Add(tombstones.Interval{Mint: 4, Maxt: math.MaxInt64})) + require.Equal(t, chunkenc.ValNone, it.Next()) + require.Equal(t, int64(1), chkMetas[0].MinTime) + }) } // Test the cost of merging series sets for different number of merged sets and their size. From 04aabdd7cc86fedba3dd2e614fa5289b746bda28 Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Wed, 1 Nov 2023 19:53:41 +0800 Subject: [PATCH 535/632] Refactor TestPopulateWithDelSeriesIterator unit tests to reuse more code Signed-off-by: Jeanette Tan --- tsdb/querier_test.go | 385 +++++++++++++++++++++---------------------- 1 file changed, 189 insertions(+), 196 deletions(-) diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 096e0ff72..1c50724d7 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -1158,220 +1158,213 @@ func rmChunkRefs(chks []chunks.Meta) { } } +func checkCurrVal(t *testing.T, valType chunkenc.ValueType, it *populateWithDelSeriesIterator, expectedTs, expectedValue int) { + switch valType { + case chunkenc.ValFloat: + ts, v := it.At() + require.Equal(t, int64(expectedTs), ts) + require.Equal(t, float64(expectedValue), v) + case chunkenc.ValHistogram: + ts, h := it.AtHistogram() + require.Equal(t, int64(expectedTs), ts) + h.CounterResetHint = histogram.UnknownCounterReset + require.Equal(t, tsdbutil.GenerateTestHistogram(expectedValue), h) + case chunkenc.ValFloatHistogram: + ts, h := it.AtFloatHistogram() + require.Equal(t, int64(expectedTs), ts) + h.CounterResetHint = histogram.UnknownCounterReset + require.Equal(t, tsdbutil.GenerateTestFloatHistogram(expectedValue), h) + default: + panic("unexpected value type") + } +} + // Regression for: https://github.com/prometheus/tsdb/pull/97 func TestPopulateWithDelSeriesIterator_DoubleSeek(t *testing.T) { - t.Run("float", func(t *testing.T) { - valType := chunkenc.ValFloat - f, chkMetas := createFakeReaderAndNotPopulatedChunks( - []chunks.Sample{}, - []chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, - []chunks.Sample{sample{4, 4, nil, nil}, sample{5, 5, nil, nil}}, - ) - it := &populateWithDelSeriesIterator{} - it.reset(ulid.ULID{}, f, chkMetas, nil) - require.Equal(t, valType, it.Seek(1)) - require.Equal(t, valType, it.Seek(2)) - require.Equal(t, valType, it.Seek(2)) - ts, v := it.At() - require.Equal(t, int64(2), ts) - require.Equal(t, float64(2), v) - require.Equal(t, int64(0), chkMetas[0].MinTime) - require.Equal(t, int64(1), chkMetas[1].MinTime) - require.Equal(t, int64(4), chkMetas[2].MinTime) - }) - t.Run("histogram", func(t *testing.T) { - valType := chunkenc.ValHistogram - f, chkMetas := createFakeReaderAndNotPopulatedChunks( - []chunks.Sample{}, - []chunks.Sample{sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil}, sample{3, 0, tsdbutil.GenerateTestHistogram(3), nil}}, - []chunks.Sample{sample{4, 0, tsdbutil.GenerateTestHistogram(4), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(5), nil}}, - ) - it := &populateWithDelSeriesIterator{} - it.reset(ulid.ULID{}, f, chkMetas, nil) - require.Equal(t, valType, it.Seek(1)) - require.Equal(t, valType, it.Seek(2)) - require.Equal(t, valType, it.Seek(2)) - ts, h := it.AtHistogram() - require.Equal(t, int64(2), ts) - h.CounterResetHint = histogram.UnknownCounterReset - require.Equal(t, tsdbutil.GenerateTestHistogram(2), h) - require.Equal(t, int64(0), chkMetas[0].MinTime) - require.Equal(t, int64(1), chkMetas[1].MinTime) - require.Equal(t, int64(4), chkMetas[2].MinTime) - }) - t.Run("float histogram", func(t *testing.T) { - valType := chunkenc.ValFloatHistogram - f, chkMetas := createFakeReaderAndNotPopulatedChunks( - []chunks.Sample{}, - []chunks.Sample{sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)}}, - []chunks.Sample{sample{4, 0, nil, tsdbutil.GenerateTestFloatHistogram(4)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)}}, - ) - it := &populateWithDelSeriesIterator{} - it.reset(ulid.ULID{}, f, chkMetas, nil) - require.Equal(t, valType, it.Seek(1)) - require.Equal(t, valType, it.Seek(2)) - require.Equal(t, valType, it.Seek(2)) - ts, h := it.AtFloatHistogram() - require.Equal(t, int64(2), ts) - h.CounterResetHint = histogram.UnknownCounterReset - require.Equal(t, tsdbutil.GenerateTestFloatHistogram(2), h) - require.Equal(t, int64(0), chkMetas[0].MinTime) - require.Equal(t, int64(1), chkMetas[1].MinTime) - require.Equal(t, int64(4), chkMetas[2].MinTime) - }) + cases := []struct { + name string + valType chunkenc.ValueType + chks [][]chunks.Sample + }{ + { + name: "float", + valType: chunkenc.ValFloat, + chks: [][]chunks.Sample{ + {}, + {sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, + {sample{4, 4, nil, nil}, sample{5, 5, nil, nil}}, + }, + }, + { + name: "histogram", + valType: chunkenc.ValHistogram, + chks: [][]chunks.Sample{ + {}, + {sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil}, sample{3, 0, tsdbutil.GenerateTestHistogram(3), nil}}, + {sample{4, 0, tsdbutil.GenerateTestHistogram(4), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(5), nil}}, + }, + }, + { + name: "float histogram", + valType: chunkenc.ValFloatHistogram, + chks: [][]chunks.Sample{ + {}, + {sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)}}, + {sample{4, 0, nil, tsdbutil.GenerateTestFloatHistogram(4)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)}}, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + f, chkMetas := createFakeReaderAndNotPopulatedChunks(tc.chks...) + it := &populateWithDelSeriesIterator{} + it.reset(ulid.ULID{}, f, chkMetas, nil) + require.Equal(t, tc.valType, it.Seek(1)) + require.Equal(t, tc.valType, it.Seek(2)) + require.Equal(t, tc.valType, it.Seek(2)) + checkCurrVal(t, tc.valType, it, 2, 2) + require.Equal(t, int64(0), chkMetas[0].MinTime) + require.Equal(t, int64(1), chkMetas[1].MinTime) + require.Equal(t, int64(4), chkMetas[2].MinTime) + }) + } } // Regression when seeked chunks were still found via binary search and we always // skipped to the end when seeking a value in the current chunk. func TestPopulateWithDelSeriesIterator_SeekInCurrentChunk(t *testing.T) { - t.Run("float", func(t *testing.T) { - valType := chunkenc.ValFloat - f, chkMetas := createFakeReaderAndNotPopulatedChunks( - []chunks.Sample{}, - []chunks.Sample{sample{1, 2, nil, nil}, sample{3, 4, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}}, - []chunks.Sample{}, - ) + cases := []struct { + name string + valType chunkenc.ValueType + chks [][]chunks.Sample + }{ + { + name: "float", + valType: chunkenc.ValFloat, + chks: [][]chunks.Sample{ + {}, + {sample{1, 2, nil, nil}, sample{3, 4, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}}, + {}, + }, + }, + { + name: "histogram", + valType: chunkenc.ValHistogram, + chks: [][]chunks.Sample{ + {}, + {sample{1, 0, tsdbutil.GenerateTestHistogram(2), nil}, sample{3, 0, tsdbutil.GenerateTestHistogram(4), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil}}, + {}, + }, + }, + { + name: "float histogram", + valType: chunkenc.ValFloatHistogram, + chks: [][]chunks.Sample{ + {}, + {sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(4)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}}, + {}, + }, + }, + } - it := &populateWithDelSeriesIterator{} - it.reset(ulid.ULID{}, f, chkMetas, nil) - require.Equal(t, valType, it.Next()) - ts, v := it.At() - require.Equal(t, int64(1), ts) - require.Equal(t, float64(2), v) - - require.Equal(t, valType, it.Seek(4)) - ts, v = it.At() - require.Equal(t, int64(5), ts) - require.Equal(t, float64(6), v) - - require.Equal(t, int64(0), chkMetas[0].MinTime) - require.Equal(t, int64(1), chkMetas[1].MinTime) - require.Equal(t, int64(0), chkMetas[2].MinTime) - }) - t.Run("histogram", func(t *testing.T) { - valType := chunkenc.ValHistogram - f, chkMetas := createFakeReaderAndNotPopulatedChunks( - []chunks.Sample{}, - []chunks.Sample{sample{1, 0, tsdbutil.GenerateTestHistogram(2), nil}, sample{3, 0, tsdbutil.GenerateTestHistogram(4), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil}}, - []chunks.Sample{}, - ) - - it := &populateWithDelSeriesIterator{} - it.reset(ulid.ULID{}, f, chkMetas, nil) - require.Equal(t, valType, it.Next()) - ts, h := it.AtHistogram() - require.Equal(t, int64(1), ts) - require.Equal(t, tsdbutil.GenerateTestHistogram(2), h) - - require.Equal(t, valType, it.Seek(4)) - ts, h = it.AtHistogram() - require.Equal(t, int64(5), ts) - h.CounterResetHint = histogram.UnknownCounterReset - require.Equal(t, tsdbutil.GenerateTestHistogram(6), h) - - require.Equal(t, int64(0), chkMetas[0].MinTime) - require.Equal(t, int64(1), chkMetas[1].MinTime) - require.Equal(t, int64(0), chkMetas[2].MinTime) - }) - t.Run("float histogram", func(t *testing.T) { - valType := chunkenc.ValFloatHistogram - f, chkMetas := createFakeReaderAndNotPopulatedChunks( - []chunks.Sample{}, - []chunks.Sample{sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(4)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}}, - []chunks.Sample{}, - ) - - it := &populateWithDelSeriesIterator{} - it.reset(ulid.ULID{}, f, chkMetas, nil) - require.Equal(t, valType, it.Next()) - ts, h := it.AtFloatHistogram() - require.Equal(t, int64(1), ts) - require.Equal(t, tsdbutil.GenerateTestFloatHistogram(2), h) - - require.Equal(t, valType, it.Seek(4)) - ts, h = it.AtFloatHistogram() - require.Equal(t, int64(5), ts) - h.CounterResetHint = histogram.UnknownCounterReset - require.Equal(t, tsdbutil.GenerateTestFloatHistogram(6), h) - - require.Equal(t, int64(0), chkMetas[0].MinTime) - require.Equal(t, int64(1), chkMetas[1].MinTime) - require.Equal(t, int64(0), chkMetas[2].MinTime) - }) + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + f, chkMetas := createFakeReaderAndNotPopulatedChunks(tc.chks...) + it := &populateWithDelSeriesIterator{} + it.reset(ulid.ULID{}, f, chkMetas, nil) + require.Equal(t, tc.valType, it.Next()) + checkCurrVal(t, tc.valType, it, 1, 2) + require.Equal(t, tc.valType, it.Seek(4)) + checkCurrVal(t, tc.valType, it, 5, 6) + require.Equal(t, int64(0), chkMetas[0].MinTime) + require.Equal(t, int64(1), chkMetas[1].MinTime) + require.Equal(t, int64(0), chkMetas[2].MinTime) + }) + } } func TestPopulateWithDelSeriesIterator_SeekWithMinTime(t *testing.T) { - t.Run("float", func(t *testing.T) { - valType := chunkenc.ValFloat - f, chkMetas := createFakeReaderAndNotPopulatedChunks( - []chunks.Sample{sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{6, 8, nil, nil}}, - ) + cases := []struct { + name string + valType chunkenc.ValueType + chks [][]chunks.Sample + }{ + { + name: "float", + valType: chunkenc.ValFloat, + chks: [][]chunks.Sample{ + {sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{6, 8, nil, nil}}, + }, + }, + { + name: "histogram", + valType: chunkenc.ValHistogram, + chks: [][]chunks.Sample{ + {sample{1, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{6, 0, tsdbutil.GenerateTestHistogram(8), nil}}, + }, + }, + { + name: "float histogram", + valType: chunkenc.ValFloatHistogram, + chks: [][]chunks.Sample{ + {sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}}, + }, + }, + } - it := &populateWithDelSeriesIterator{} - it.reset(ulid.ULID{}, f, chkMetas, nil) - require.Equal(t, chunkenc.ValNone, it.Seek(7)) - require.Equal(t, valType, it.Seek(3)) - require.Equal(t, int64(1), chkMetas[0].MinTime) - }) - t.Run("histogram", func(t *testing.T) { - valType := chunkenc.ValHistogram - f, chkMetas := createFakeReaderAndNotPopulatedChunks( - []chunks.Sample{sample{1, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{6, 0, tsdbutil.GenerateTestHistogram(8), nil}}, - ) - - it := &populateWithDelSeriesIterator{} - it.reset(ulid.ULID{}, f, chkMetas, nil) - require.Equal(t, chunkenc.ValNone, it.Seek(7)) - require.Equal(t, valType, it.Seek(3)) - require.Equal(t, int64(1), chkMetas[0].MinTime) - }) - t.Run("float histogram", func(t *testing.T) { - valType := chunkenc.ValFloatHistogram - f, chkMetas := createFakeReaderAndNotPopulatedChunks( - []chunks.Sample{sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}}, - ) - - it := &populateWithDelSeriesIterator{} - it.reset(ulid.ULID{}, f, chkMetas, nil) - require.Equal(t, chunkenc.ValNone, it.Seek(7)) - require.Equal(t, valType, it.Seek(3)) - require.Equal(t, int64(1), chkMetas[0].MinTime) - }) + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + f, chkMetas := createFakeReaderAndNotPopulatedChunks(tc.chks...) + it := &populateWithDelSeriesIterator{} + it.reset(ulid.ULID{}, f, chkMetas, nil) + require.Equal(t, chunkenc.ValNone, it.Seek(7)) + require.Equal(t, tc.valType, it.Seek(3)) + require.Equal(t, int64(1), chkMetas[0].MinTime) + }) + } } // Regression when calling Next() with a time bounded to fit within two samples. // Seek gets called and advances beyond the max time, which was just accepted as a valid sample. func TestPopulateWithDelSeriesIterator_NextWithMinTime(t *testing.T) { - t.Run("float", func(t *testing.T) { - f, chkMetas := createFakeReaderAndNotPopulatedChunks( - []chunks.Sample{sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}}, - ) + cases := []struct { + name string + valType chunkenc.ValueType + chks [][]chunks.Sample + }{ + { + name: "float", + valType: chunkenc.ValFloat, + chks: [][]chunks.Sample{ + {sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}}, + }, + }, + { + name: "histogram", + valType: chunkenc.ValHistogram, + chks: [][]chunks.Sample{ + {sample{1, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil}}, + }, + }, + { + name: "float histogram", + valType: chunkenc.ValFloatHistogram, + chks: [][]chunks.Sample{ + {sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}}, + }, + }, + } - it := &populateWithDelSeriesIterator{} - it.reset(ulid.ULID{}, f, chkMetas, tombstones.Intervals{{Mint: math.MinInt64, Maxt: 2}}.Add(tombstones.Interval{Mint: 4, Maxt: math.MaxInt64})) - require.Equal(t, chunkenc.ValNone, it.Next()) - require.Equal(t, int64(1), chkMetas[0].MinTime) - }) - t.Run("histogram", func(t *testing.T) { - f, chkMetas := createFakeReaderAndNotPopulatedChunks( - []chunks.Sample{sample{1, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil}}, - ) - - it := &populateWithDelSeriesIterator{} - it.reset(ulid.ULID{}, f, chkMetas, tombstones.Intervals{{Mint: math.MinInt64, Maxt: 2}}.Add(tombstones.Interval{Mint: 4, Maxt: math.MaxInt64})) - require.Equal(t, chunkenc.ValNone, it.Next()) - require.Equal(t, int64(1), chkMetas[0].MinTime) - }) - t.Run("float histogram", func(t *testing.T) { - f, chkMetas := createFakeReaderAndNotPopulatedChunks( - []chunks.Sample{sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}}, - ) - - it := &populateWithDelSeriesIterator{} - it.reset(ulid.ULID{}, f, chkMetas, tombstones.Intervals{{Mint: math.MinInt64, Maxt: 2}}.Add(tombstones.Interval{Mint: 4, Maxt: math.MaxInt64})) - require.Equal(t, chunkenc.ValNone, it.Next()) - require.Equal(t, int64(1), chkMetas[0].MinTime) - }) + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + f, chkMetas := createFakeReaderAndNotPopulatedChunks(tc.chks...) + it := &populateWithDelSeriesIterator{} + it.reset(ulid.ULID{}, f, chkMetas, tombstones.Intervals{{Mint: math.MinInt64, Maxt: 2}}.Add(tombstones.Interval{Mint: 4, Maxt: math.MaxInt64})) + require.Equal(t, chunkenc.ValNone, it.Next()) + require.Equal(t, int64(1), chkMetas[0].MinTime) + }) + } } // Test the cost of merging series sets for different number of merged sets and their size. From 7a4a1127b70d91ee3805238c61957ada1874b13a Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Wed, 1 Nov 2023 19:53:41 +0800 Subject: [PATCH 536/632] Expand TestPopulateWithTombSeriesIterators to test min max times of chunks, including mixed chunks Signed-off-by: Jeanette Tan --- tsdb/querier_test.go | 124 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 122 insertions(+), 2 deletions(-) diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 1c50724d7..5ad5003f1 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -694,12 +694,16 @@ func (r *fakeChunksReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { } func TestPopulateWithTombSeriesIterators(t *testing.T) { + type minMaxTimes struct { + minTime, maxTime int64 + } cases := []struct { name string chks [][]chunks.Sample - expected []chunks.Sample - expectedChks []chunks.Meta + expected []chunks.Sample + expectedChks []chunks.Meta + expectedMinMaxTimes []minMaxTimes intervals tombstones.Intervals @@ -718,6 +722,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { expectedChks: []chunks.Meta{ assureChunkFromSamples(t, []chunks.Sample{}), }, + expectedMinMaxTimes: []minMaxTimes{{0, 0}}, }, { name: "three empty chunks", // This should never happen. @@ -728,6 +733,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { assureChunkFromSamples(t, []chunks.Sample{}), assureChunkFromSamples(t, []chunks.Sample{}), }, + expectedMinMaxTimes: []minMaxTimes{{0, 0}, {0, 0}, {0, 0}}, }, { name: "one chunk", @@ -743,6 +749,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, }), }, + expectedMinMaxTimes: []minMaxTimes{{1, 6}}, }, { name: "two full chunks", @@ -762,6 +769,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { sample{7, 89, nil, nil}, sample{9, 8, nil, nil}, }), }, + expectedMinMaxTimes: []minMaxTimes{{1, 6}, {7, 9}}, }, { name: "three full chunks", @@ -785,6 +793,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { sample{10, 22, nil, nil}, sample{203, 3493, nil, nil}, }), }, + expectedMinMaxTimes: []minMaxTimes{{1, 6}, {7, 9}, {10, 203}}, }, // Seek cases. { @@ -855,6 +864,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { sample{7, 89, nil, nil}, }), }, + expectedMinMaxTimes: []minMaxTimes{{3, 6}, {7, 7}}, }, { name: "two chunks with trimmed middle sample of first chunk", @@ -875,6 +885,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { sample{7, 89, nil, nil}, sample{9, 8, nil, nil}, }), }, + expectedMinMaxTimes: []minMaxTimes{{1, 6}, {7, 9}}, }, { name: "two chunks with deletion across two chunks", @@ -895,6 +906,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { sample{9, 8, nil, nil}, }), }, + expectedMinMaxTimes: []minMaxTimes{{1, 3}, {9, 9}}, }, // Deletion with seek. { @@ -935,6 +947,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil}, }), }, + expectedMinMaxTimes: []minMaxTimes{{1, 6}}, }, { name: "one histogram chunk intersect with deletion interval", @@ -959,6 +972,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil}, }), }, + expectedMinMaxTimes: []minMaxTimes{{1, 3}}, }, { name: "one float histogram chunk", @@ -984,6 +998,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))}, }), }, + expectedMinMaxTimes: []minMaxTimes{{1, 6}}, }, { name: "one float histogram chunk intersect with deletion interval", @@ -1008,6 +1023,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))}, }), }, + expectedMinMaxTimes: []minMaxTimes{{1, 3}}, }, { name: "one gauge histogram chunk", @@ -1033,6 +1049,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil}, }), }, + expectedMinMaxTimes: []minMaxTimes{{1, 6}}, }, { name: "one gauge histogram chunk intersect with deletion interval", @@ -1057,6 +1074,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil}, }), }, + expectedMinMaxTimes: []minMaxTimes{{1, 3}}, }, { name: "one gauge float histogram", @@ -1082,6 +1100,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)}, }), }, + expectedMinMaxTimes: []minMaxTimes{{1, 6}}, }, { name: "one gauge float histogram chunk intersect with deletion interval", @@ -1106,6 +1125,102 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)}, }), }, + expectedMinMaxTimes: []minMaxTimes{{1, 3}}, + }, + { + name: "three full mixed chunks", + chks: [][]chunks.Sample{ + {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, + { + sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, + sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, + }, + { + sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, + sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)}, + }, + }, + + expected: []chunks.Sample{ + sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)}, + }, + expectedChks: []chunks.Meta{ + assureChunkFromSamples(t, []chunks.Sample{ + sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, + sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, + sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)}, + }), + }, + expectedMinMaxTimes: []minMaxTimes{{1, 6}, {7, 9}, {10, 203}}, + }, + { + name: "three full mixed chunks in different order", + chks: [][]chunks.Sample{ + { + sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, + sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, + }, + {sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}}, + { + sample{100, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, + sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)}, + }, + }, + + expected: []chunks.Sample{ + sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}, sample{100, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)}, + }, + expectedChks: []chunks.Meta{ + assureChunkFromSamples(t, []chunks.Sample{ + sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, + sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{100, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, + sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)}, + }), + }, + expectedMinMaxTimes: []minMaxTimes{{7, 9}, {11, 16}, {100, 203}}, + }, + { + name: "three full mixed chunks in different order intersect with deletion interval", + chks: [][]chunks.Sample{ + { + sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, + sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, + }, + {sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}}, + { + sample{100, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, + sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)}, + }, + }, + intervals: tombstones.Intervals{{Mint: 8, Maxt: 11}, {Mint: 15, Maxt: 150}}, + + expected: []chunks.Sample{ + sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)}, + }, + expectedChks: []chunks.Meta{ + assureChunkFromSamples(t, []chunks.Sample{ + sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)}, + }), + }, + expectedMinMaxTimes: []minMaxTimes{{7, 7}, {12, 13}, {203, 203}}, }, } for _, tc := range cases { @@ -1147,6 +1262,11 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { rmChunkRefs(expandedResult) rmChunkRefs(tc.expectedChks) require.Equal(t, tc.expectedChks, expandedResult) + + for i, meta := range expandedResult { + require.Equal(t, tc.expectedMinMaxTimes[i].minTime, meta.MinTime) + require.Equal(t, tc.expectedMinMaxTimes[i].maxTime, meta.MaxTime) + } }) }) } From 2f7060bd5afa40d50e7251cacd08a69bb8c4684f Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Wed, 1 Nov 2023 20:04:23 +0800 Subject: [PATCH 537/632] Expand TestPopulateWithTombSeriesIterators to test earlier deletion intervals for histogram chunks as well as time-overlapping chunks Signed-off-by: Jeanette Tan --- tsdb/querier_test.go | 132 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 128 insertions(+), 4 deletions(-) diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 5ad5003f1..720d5c699 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -950,7 +950,30 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { expectedMinMaxTimes: []minMaxTimes{{1, 6}}, }, { - name: "one histogram chunk intersect with deletion interval", + name: "one histogram chunk intersect with earlier deletion interval", + chks: [][]chunks.Sample{ + { + sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, + sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil}, + sample{3, 0, tsdbutil.GenerateTestHistogram(3), nil}, + sample{6, 0, tsdbutil.GenerateTestHistogram(6), nil}, + }, + }, + intervals: tombstones.Intervals{{Mint: 1, Maxt: 2}}, + expected: []chunks.Sample{ + sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil}, + sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil}, + }, + expectedChks: []chunks.Meta{ + assureChunkFromSamples(t, []chunks.Sample{ + sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil}, + sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil}, + }), + }, + expectedMinMaxTimes: []minMaxTimes{{3, 6}}, + }, + { + name: "one histogram chunk intersect with later deletion interval", chks: [][]chunks.Sample{ { sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, @@ -1001,7 +1024,30 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { expectedMinMaxTimes: []minMaxTimes{{1, 6}}, }, { - name: "one float histogram chunk intersect with deletion interval", + name: "one float histogram chunk intersect with earlier deletion interval", + chks: [][]chunks.Sample{ + { + sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, + sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, + sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)}, + sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, + }, + }, + intervals: tombstones.Intervals{{Mint: 1, Maxt: 2}}, + expected: []chunks.Sample{ + sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))}, + sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))}, + }, + expectedChks: []chunks.Meta{ + assureChunkFromSamples(t, []chunks.Sample{ + sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))}, + sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))}, + }), + }, + expectedMinMaxTimes: []minMaxTimes{{3, 6}}, + }, + { + name: "one float histogram chunk intersect with later deletion interval", chks: [][]chunks.Sample{ { sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, @@ -1052,7 +1098,30 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { expectedMinMaxTimes: []minMaxTimes{{1, 6}}, }, { - name: "one gauge histogram chunk intersect with deletion interval", + name: "one gauge histogram chunk intersect with earlier deletion interval", + chks: [][]chunks.Sample{ + { + sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil}, + sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil}, + sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil}, + sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil}, + }, + }, + intervals: tombstones.Intervals{{Mint: 1, Maxt: 2}}, + expected: []chunks.Sample{ + sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil}, + sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil}, + }, + expectedChks: []chunks.Meta{ + assureChunkFromSamples(t, []chunks.Sample{ + sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil}, + sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil}, + }), + }, + expectedMinMaxTimes: []minMaxTimes{{3, 6}}, + }, + { + name: "one gauge histogram chunk intersect with later deletion interval", chks: [][]chunks.Sample{ { sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil}, @@ -1103,7 +1172,30 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { expectedMinMaxTimes: []minMaxTimes{{1, 6}}, }, { - name: "one gauge float histogram chunk intersect with deletion interval", + name: "one gauge float histogram chunk intersect with earlier deletion interval", + chks: [][]chunks.Sample{ + { + sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)}, + sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)}, + sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)}, + sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)}, + }, + }, + intervals: tombstones.Intervals{{Mint: 1, Maxt: 2}}, + expected: []chunks.Sample{ + sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)}, + sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)}, + }, + expectedChks: []chunks.Meta{ + assureChunkFromSamples(t, []chunks.Sample{ + sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)}, + sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)}, + }), + }, + expectedMinMaxTimes: []minMaxTimes{{3, 6}}, + }, + { + name: "one gauge float histogram chunk intersect with later deletion interval", chks: [][]chunks.Sample{ { sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)}, @@ -1222,6 +1314,38 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, expectedMinMaxTimes: []minMaxTimes{{7, 7}, {12, 13}, {203, 203}}, }, + { + name: "three full mixed chunks overlapping", + chks: [][]chunks.Sample{ + { + sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, + sample{12, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, + }, + {sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}}, + { + sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, + sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)}, + }, + }, + + expected: []chunks.Sample{ + sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{12, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}, sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)}, + }, + expectedChks: []chunks.Meta{ + assureChunkFromSamples(t, []chunks.Sample{ + sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, + sample{12, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, + sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)}, + }), + }, + expectedMinMaxTimes: []minMaxTimes{{7, 12}, {11, 16}, {10, 203}}, + }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { From 27abf09e7fe80237d4925874de4d2e524460e71a Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Wed, 1 Nov 2023 19:53:41 +0800 Subject: [PATCH 538/632] Fix missing MinTime in histogram chunks Signed-off-by: Jeanette Tan --- tsdb/querier.go | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/tsdb/querier.go b/tsdb/querier.go index 96406f1bd..9a01e8c49 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -855,13 +855,18 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool { if app, err = newChunk.Appender(); err != nil { break } - - for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() { + var h *histogram.Histogram + t, h = p.currDelIter.AtHistogram() + p.curr.MinTime = t + _, _, app, err = app.AppendHistogram(nil, t, h, true) + if err != nil { + break + } + for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() { if vt != chunkenc.ValHistogram { err = fmt.Errorf("found value type %v in histogram chunk", vt) break } - var h *histogram.Histogram t, h = p.currDelIter.AtHistogram() _, _, app, err = app.AppendHistogram(nil, t, h, true) if err != nil { @@ -890,13 +895,18 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool { if app, err = newChunk.Appender(); err != nil { break } - - for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() { + var h *histogram.FloatHistogram + t, h = p.currDelIter.AtFloatHistogram() + p.curr.MinTime = t + _, _, app, err = app.AppendFloatHistogram(nil, t, h, true) + if err != nil { + break + } + for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() { if vt != chunkenc.ValFloatHistogram { err = fmt.Errorf("found value type %v in histogram chunk", vt) break } - var h *histogram.FloatHistogram t, h = p.currDelIter.AtFloatHistogram() _, _, app, err = app.AppendFloatHistogram(nil, t, h, true) if err != nil { From 4296ecbd14dd2639eb5768ab66e4c61ea86c7b33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Wed, 1 Nov 2023 15:52:04 +0100 Subject: [PATCH 539/632] tsdb/compact_test.go: test mixed typed series with PopulateBlock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add testcase and update test so that it can test native histograms as well. Signed-off-by: György Krajcsovits --- tsdb/compact_test.go | 43 ++++++++++++++++++++++++++++++++++++++++--- tsdb/querier_test.go | 33 ++++++++++++++++++++++++++++----- 2 files changed, 68 insertions(+), 8 deletions(-) diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 098be8bfa..2d98b3050 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -38,6 +38,7 @@ import ( "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/tombstones" + "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/wlog" ) @@ -933,6 +934,31 @@ func TestCompaction_populateBlock(t *testing.T) { }, }, }, + { + title: "Populate from mixed type series and expect sample inside the interval only", // regression test for populateWithDelChunkSeriesIterator failing to set minTime on chunks + compactMinTime: 1, + compactMaxTime: 11, + inputSeriesSamples: [][]seriesSamples{ + { + { + lset: map[string]string{"a": "1"}, + chunks: [][]sample{ + {{t: 0, h: tsdbutil.GenerateTestHistogram(0)}, {t: 1, h: tsdbutil.GenerateTestHistogram(1)}}, + {{t: 10, f: 1}, {t: 11, f: 2}}, + }, + }, + }, + }, + expSeriesSamples: []seriesSamples{ + { + lset: map[string]string{"a": "1"}, + chunks: [][]sample{ + {{t: 1, h: tsdbutil.GenerateTestHistogram(1)}}, + {{t: 10, f: 1}}, + }, + }, + }, + }, } { t.Run(tc.title, func(t *testing.T) { blocks := make([]BlockReader, 0, len(tc.inputSeriesSamples)) @@ -974,12 +1000,23 @@ func TestCompaction_populateBlock(t *testing.T) { firstTs int64 = math.MaxInt64 s sample ) - for iter.Next() == chunkenc.ValFloat { - s.t, s.f = iter.At() + for vt := iter.Next(); vt != chunkenc.ValNone; vt = iter.Next() { + switch vt { + case chunkenc.ValFloat: + s.t, s.f = iter.At() + samples = append(samples, s) + case chunkenc.ValHistogram: + s.t, s.h = iter.AtHistogram() + samples = append(samples, s) + case chunkenc.ValFloatHistogram: + s.t, s.fh = iter.AtFloatHistogram() + samples = append(samples, s) + default: + require.Fail(t, "unexpected value type") + } if firstTs == math.MaxInt64 { firstTs = s.t } - samples = append(samples, s) } // Check if chunk has correct min, max times. diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 720d5c699..17a99c6e3 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -133,12 +133,35 @@ func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkRe Ref: chunkRef, }) - chunk := chunkenc.NewXORChunk() - app, _ := chunk.Appender() - for _, smpl := range chk { - app.Append(smpl.t, smpl.f) + switch { + case chk[0].fh != nil: + chunk := chunkenc.NewFloatHistogramChunk() + app, _ := chunk.Appender() + for _, smpl := range chk { + require.NotNil(t, smpl.fh, "chunk can only contain one type of sample") + _, _, _, err := app.AppendFloatHistogram(nil, smpl.t, smpl.fh, true) + require.NoError(t, err, "chunk should be appendable") + } + chkReader[chunkRef] = chunk + case chk[0].h != nil: + chunk := chunkenc.NewHistogramChunk() + app, _ := chunk.Appender() + for _, smpl := range chk { + require.NotNil(t, smpl.h, "chunk can only contain one type of sample") + _, _, _, err := app.AppendHistogram(nil, smpl.t, smpl.h, true) + require.NoError(t, err, "chunk should be appendable") + } + chkReader[chunkRef] = chunk + default: + chunk := chunkenc.NewXORChunk() + app, _ := chunk.Appender() + for _, smpl := range chk { + require.Nil(t, smpl.h, "chunk can only contain one type of sample") + require.Nil(t, smpl.fh, "chunk can only contain one type of sample") + app.Append(smpl.t, smpl.f) + } + chkReader[chunkRef] = chunk } - chkReader[chunkRef] = chunk chunkRef++ } ls := labels.FromMap(s.lset) From 3ccaaa40ba088b74e69a471e95fce9958bd291a5 Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Thu, 2 Nov 2023 13:37:07 +0800 Subject: [PATCH 540/632] Fix according to code review Signed-off-by: Jeanette Tan --- tsdb/compact_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 2d98b3050..cf8c2439a 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -935,7 +935,8 @@ func TestCompaction_populateBlock(t *testing.T) { }, }, { - title: "Populate from mixed type series and expect sample inside the interval only", // regression test for populateWithDelChunkSeriesIterator failing to set minTime on chunks + // Regression test for populateWithDelChunkSeriesIterator failing to set minTime on chunks. + title: "Populate from mixed type series and expect sample inside the interval only.", compactMinTime: 1, compactMaxTime: 11, inputSeriesSamples: [][]seriesSamples{ From 9e3df532d8294d4fe3284bde7bc96db336a33552 Mon Sep 17 00:00:00 2001 From: Dimitar Dimitrov Date: Thu, 2 Nov 2023 13:17:35 +0100 Subject: [PATCH 541/632] Export `promql.Engine.FindMinMaxTime` This function is useful to analyze promQL queries. We want to use this in Mimir to record the time range which the query touches. I also chose to remove the `Engine` receiver because it was unnecessary, and it makes it easier to use, but happy to refactor that if you disagree. The function is untested on its own. If you prefer to have unit tests now that its exported, I can look into adding some. Signed-off-by: Dimitar Dimitrov --- promql/engine.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index ed820b90d..37057d209 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -674,7 +674,7 @@ func durationMilliseconds(d time.Duration) int64 { // execEvalStmt evaluates the expression of an evaluation statement for the given time range. func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.EvalStmt) (parser.Value, annotations.Annotations, error) { prepareSpanTimer, ctxPrepare := query.stats.GetSpanTimer(ctx, stats.QueryPreparationTime, ng.metrics.queryPrepareTime) - mint, maxt := ng.findMinMaxTime(s) + mint, maxt := FindMinMaxTime(s) querier, err := query.queryable.Querier(mint, maxt) if err != nil { prepareSpanTimer.Finish() @@ -816,7 +816,10 @@ func subqueryTimes(path []parser.Node) (time.Duration, time.Duration, *int64) { return subqOffset, subqRange, tsp } -func (ng *Engine) findMinMaxTime(s *parser.EvalStmt) (int64, int64) { +// FindMinMaxTime returns the time in milliseconds of the earliest and latest point in time the statement will try to process. +// This takes into account offsets, @ modifiers, and range selectors. +// If the statement does not select series, then FindMinMaxTime returns (0, 0). +func FindMinMaxTime(s *parser.EvalStmt) (int64, int64) { var minTimestamp, maxTimestamp int64 = math.MaxInt64, math.MinInt64 // Whenever a MatrixSelector is evaluated, evalRange is set to the corresponding range. // The evaluation of the VectorSelector inside then evaluates the given range and unsets @@ -825,7 +828,7 @@ func (ng *Engine) findMinMaxTime(s *parser.EvalStmt) (int64, int64) { parser.Inspect(s.Expr, func(node parser.Node, path []parser.Node) error { switch n := node.(type) { case *parser.VectorSelector: - start, end := ng.getTimeRangesForSelector(s, n, path, evalRange) + start, end := getTimeRangesForSelector(s, n, path, evalRange) if start < minTimestamp { minTimestamp = start } @@ -848,7 +851,7 @@ func (ng *Engine) findMinMaxTime(s *parser.EvalStmt) (int64, int64) { return minTimestamp, maxTimestamp } -func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path []parser.Node, evalRange time.Duration) (int64, int64) { +func getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path []parser.Node, evalRange time.Duration) (int64, int64) { start, end := timestamp.FromTime(s.Start), timestamp.FromTime(s.End) subqOffset, subqRange, subqTs := subqueryTimes(path) @@ -905,7 +908,7 @@ func (ng *Engine) populateSeries(ctx context.Context, querier storage.Querier, s parser.Inspect(s.Expr, func(node parser.Node, path []parser.Node) error { switch n := node.(type) { case *parser.VectorSelector: - start, end := ng.getTimeRangesForSelector(s, n, path, evalRange) + start, end := getTimeRangesForSelector(s, n, path, evalRange) interval := ng.getLastSubqueryInterval(path) if interval == 0 { interval = s.Interval From 52eb303031450f06e4d2da6c3f08681c0a5b8bb3 Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Thu, 2 Nov 2023 21:23:05 +0800 Subject: [PATCH 542/632] Refactor assigning MinTime in histogram chunks Signed-off-by: Jeanette Tan --- tsdb/querier.go | 28 +++++++--------------------- 1 file changed, 7 insertions(+), 21 deletions(-) diff --git a/tsdb/querier.go b/tsdb/querier.go index 9a01e8c49..a832c0d1b 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -840,6 +840,7 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool { } return false } + p.curr.MinTime = p.currDelIter.AtT() // Re-encode the chunk if iterator is provider. This means that it has // some samples to be deleted or chunk is opened. @@ -855,18 +856,12 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool { if app, err = newChunk.Appender(); err != nil { break } - var h *histogram.Histogram - t, h = p.currDelIter.AtHistogram() - p.curr.MinTime = t - _, _, app, err = app.AppendHistogram(nil, t, h, true) - if err != nil { - break - } - for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() { + for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() { if vt != chunkenc.ValHistogram { err = fmt.Errorf("found value type %v in histogram chunk", vt) break } + var h *histogram.Histogram t, h = p.currDelIter.AtHistogram() _, _, app, err = app.AppendHistogram(nil, t, h, true) if err != nil { @@ -878,15 +873,12 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool { if app, err = newChunk.Appender(); err != nil { break } - var v float64 - t, v = p.currDelIter.At() - p.curr.MinTime = t - app.Append(t, v) - for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() { + for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() { if vt != chunkenc.ValFloat { err = fmt.Errorf("found value type %v in float chunk", vt) break } + var v float64 t, v = p.currDelIter.At() app.Append(t, v) } @@ -895,18 +887,12 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool { if app, err = newChunk.Appender(); err != nil { break } - var h *histogram.FloatHistogram - t, h = p.currDelIter.AtFloatHistogram() - p.curr.MinTime = t - _, _, app, err = app.AppendFloatHistogram(nil, t, h, true) - if err != nil { - break - } - for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() { + for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() { if vt != chunkenc.ValFloatHistogram { err = fmt.Errorf("found value type %v in histogram chunk", vt) break } + var h *histogram.FloatHistogram t, h = p.currDelIter.AtFloatHistogram() _, _, app, err = app.AppendFloatHistogram(nil, t, h, true) if err != nil { From ea27db7389fa1bfad1b0e369cfa2053173414bb9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 2 Nov 2023 18:04:56 +0000 Subject: [PATCH 543/632] build(deps): bump github.com/linode/linodego from 1.23.0 to 1.24.0 Bumps [github.com/linode/linodego](https://github.com/linode/linodego) from 1.23.0 to 1.24.0. - [Release notes](https://github.com/linode/linodego/releases) - [Commits](https://github.com/linode/linodego/compare/v1.23.0...v1.24.0) --- updated-dependencies: - dependency-name: github.com/linode/linodego dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 19 ++++++++++++++----- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 7d9ed9c1f..c1feeff22 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.1 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.23.0 + github.com/linode/linodego v1.24.0 github.com/miekg/dns v1.1.56 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f @@ -135,7 +135,7 @@ require ( github.com/go-openapi/spec v0.20.9 // indirect github.com/go-openapi/swag v0.22.4 // indirect github.com/go-openapi/validate v0.22.1 // indirect - github.com/go-resty/resty/v2 v2.7.0 // indirect + github.com/go-resty/resty/v2 v2.10.0 // indirect github.com/golang/glog v1.1.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect diff --git a/go.sum b/go.sum index 265ed3572..c6293e2b3 100644 --- a/go.sum +++ b/go.sum @@ -236,8 +236,8 @@ github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogB github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= -github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= -github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= +github.com/go-resty/resty/v2 v2.10.0 h1:Qla4W/+TMmv0fOeeRqzEpXPLfTUnR5HZ1+lGs+CkiCo= +github.com/go-resty/resty/v2 v2.10.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= @@ -498,8 +498,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.23.0 h1:s0ReCZtuN9Z1IoUN9w1RLeYO1dMZUGPwOQ/IBFsBHtU= -github.com/linode/linodego v1.23.0/go.mod h1:0U7wj/UQOqBNbKv1FYTXiBUXueR8DY4HvIotwE0ENgg= +github.com/linode/linodego v1.24.0 h1:zO+bMdTE6wPccqP7QIkbxAfACX7DjSX6DW9JE/qOKDQ= +github.com/linode/linodego v1.24.0/go.mod h1:cq/ty5BCEQnsO6OjMqD7Q03KCCyB8CNM5E3MNg0LV6M= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -846,6 +846,7 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -888,10 +889,11 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -915,6 +917,7 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -982,12 +985,16 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1001,6 +1008,7 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1062,6 +1070,7 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 9d9c5983215ab183bf4ddcfe0ab7f8b2c3710c3f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 2 Nov 2023 20:09:48 +0000 Subject: [PATCH 544/632] build(deps): bump github.com/klauspost/compress from 1.17.1 to 1.17.2 Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.17.1 to 1.17.2. - [Release notes](https://github.com/klauspost/compress/releases) - [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml) - [Commits](https://github.com/klauspost/compress/compare/v1.17.1...v1.17.2) --- updated-dependencies: - dependency-name: github.com/klauspost/compress dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c1feeff22..ccadd3cae 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/hetznercloud/hcloud-go/v2 v2.4.0 github.com/ionos-cloud/sdk-go/v6 v6.1.9 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.17.1 + github.com/klauspost/compress v1.17.2 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/linode/linodego v1.24.0 github.com/miekg/dns v1.1.56 diff --git a/go.sum b/go.sum index c6293e2b3..3e49020b5 100644 --- a/go.sum +++ b/go.sum @@ -479,8 +479,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.1 h1:NE3C767s2ak2bweCZo3+rdP4U/HoyVXLv/X9f2gPS5g= -github.com/klauspost/compress v1.17.1/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= +github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= From fe057fc60d09fa49f70898e4c954566b570ecc5d Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Thu, 2 Nov 2023 21:45:07 +0100 Subject: [PATCH 545/632] use Go standard errors package Signed-off-by: Matthieu MOREL --- .golangci.yml | 11 +++++++++-- discovery/ionos/ionos.go | 2 +- model/textparse/protobufparse.go | 16 ++++++++-------- scrape/manager.go | 2 +- scrape/target.go | 18 +++++++++--------- storage/fanout_test.go | 2 +- util/annotations/annotations.go | 4 ++-- 7 files changed, 31 insertions(+), 24 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 871d748c7..2d53106c0 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -37,12 +37,17 @@ issues: - path: tsdb/ linters: - errorlint - - path: util/ + - path: tsdb/ + text: "import 'github.com/pkg/errors' is not allowed" linters: - - errorlint + - depguard - path: web/ linters: - errorlint + - path: web/ + text: "import 'github.com/pkg/errors' is not allowed" + linters: + - depguard - linters: - godot source: "^// ===" @@ -62,6 +67,8 @@ linters-settings: desc: "Use corresponding 'os' or 'io' functions instead." - pkg: "regexp" desc: "Use github.com/grafana/regexp instead of regexp" + - pkg: "github.com/pkg/errors" + desc: "Use 'errors' or 'fmt' instead of github.com/pkg/errors" errcheck: exclude-functions: # Don't flag lines such as "io.Copy(io.Discard, resp.Body)". diff --git a/discovery/ionos/ionos.go b/discovery/ionos/ionos.go index a13a00058..3afed8d79 100644 --- a/discovery/ionos/ionos.go +++ b/discovery/ionos/ionos.go @@ -14,10 +14,10 @@ package ionos import ( + "errors" "time" "github.com/go-kit/log" - "github.com/pkg/errors" "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index d6d87ee36..9a6dd6f6d 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -16,6 +16,7 @@ package textparse import ( "bytes" "encoding/binary" + "errors" "fmt" "io" "math" @@ -24,7 +25,6 @@ import ( "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/types" - "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" @@ -396,10 +396,10 @@ func (p *ProtobufParser) Next() (Entry, error) { // into metricBytes and validate only name, help, and type for now. name := p.mf.GetName() if !model.IsValidMetricName(model.LabelValue(name)) { - return EntryInvalid, errors.Errorf("invalid metric name: %s", name) + return EntryInvalid, fmt.Errorf("invalid metric name: %s", name) } if help := p.mf.GetHelp(); !utf8.ValidString(help) { - return EntryInvalid, errors.Errorf("invalid help for metric %q: %s", name, help) + return EntryInvalid, fmt.Errorf("invalid help for metric %q: %s", name, help) } switch p.mf.GetType() { case dto.MetricType_COUNTER, @@ -410,7 +410,7 @@ func (p *ProtobufParser) Next() (Entry, error) { dto.MetricType_UNTYPED: // All good. default: - return EntryInvalid, errors.Errorf("unknown metric type for metric %q: %s", name, p.mf.GetType()) + return EntryInvalid, fmt.Errorf("unknown metric type for metric %q: %s", name, p.mf.GetType()) } p.metricBytes.Reset() p.metricBytes.WriteString(name) @@ -463,7 +463,7 @@ func (p *ProtobufParser) Next() (Entry, error) { return EntryInvalid, err } default: - return EntryInvalid, errors.Errorf("invalid protobuf parsing state: %d", p.state) + return EntryInvalid, fmt.Errorf("invalid protobuf parsing state: %d", p.state) } return p.state, nil } @@ -476,13 +476,13 @@ func (p *ProtobufParser) updateMetricBytes() error { b.WriteByte(model.SeparatorByte) n := lp.GetName() if !model.LabelName(n).IsValid() { - return errors.Errorf("invalid label name: %s", n) + return fmt.Errorf("invalid label name: %s", n) } b.WriteString(n) b.WriteByte(model.SeparatorByte) v := lp.GetValue() if !utf8.ValidString(v) { - return errors.Errorf("invalid label value: %s", v) + return fmt.Errorf("invalid label value: %s", v) } b.WriteString(v) } @@ -557,7 +557,7 @@ func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) { } totalLength := varIntLength + int(messageLength) if totalLength > len(b) { - return 0, errors.Errorf("protobufparse: insufficient length of buffer, expected at least %d bytes, got %d bytes", totalLength, len(b)) + return 0, fmt.Errorf("protobufparse: insufficient length of buffer, expected at least %d bytes, got %d bytes", totalLength, len(b)) } mf.Reset() return totalLength, mf.Unmarshal(b[varIntLength:totalLength]) diff --git a/scrape/manager.go b/scrape/manager.go index 69bd4bc42..a0ac38f6b 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -14,6 +14,7 @@ package scrape import ( + "errors" "fmt" "hash/fnv" "reflect" @@ -22,7 +23,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/scrape/target.go b/scrape/target.go index ad39b6bb2..8cc8597a4 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -14,6 +14,7 @@ package scrape import ( + "errors" "fmt" "hash/fnv" "net" @@ -22,7 +23,6 @@ import ( "sync" "time" - "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/config" @@ -289,12 +289,12 @@ func (t *Target) intervalAndTimeout(defaultInterval, defaultDuration time.Durati intervalLabel := t.labels.Get(model.ScrapeIntervalLabel) interval, err := model.ParseDuration(intervalLabel) if err != nil { - return defaultInterval, defaultDuration, errors.Errorf("Error parsing interval label %q: %v", intervalLabel, err) + return defaultInterval, defaultDuration, fmt.Errorf("Error parsing interval label %q: %w", intervalLabel, err) } timeoutLabel := t.labels.Get(model.ScrapeTimeoutLabel) timeout, err := model.ParseDuration(timeoutLabel) if err != nil { - return defaultInterval, defaultDuration, errors.Errorf("Error parsing timeout label %q: %v", timeoutLabel, err) + return defaultInterval, defaultDuration, fmt.Errorf("Error parsing timeout label %q: %w", timeoutLabel, err) } return time.Duration(interval), time.Duration(timeout), nil @@ -444,7 +444,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort case "https": addr += ":443" default: - return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("invalid scheme: %q", cfg.Scheme) + return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("invalid scheme: %q", cfg.Scheme) } lb.Set(model.AddressLabel, addr) } @@ -471,7 +471,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort interval := lb.Get(model.ScrapeIntervalLabel) intervalDuration, err := model.ParseDuration(interval) if err != nil { - return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("error parsing scrape interval: %v", err) + return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("error parsing scrape interval: %w", err) } if time.Duration(intervalDuration) == 0 { return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("scrape interval cannot be 0") @@ -480,14 +480,14 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort timeout := lb.Get(model.ScrapeTimeoutLabel) timeoutDuration, err := model.ParseDuration(timeout) if err != nil { - return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("error parsing scrape timeout: %v", err) + return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("error parsing scrape timeout: %w", err) } if time.Duration(timeoutDuration) == 0 { return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("scrape timeout cannot be 0") } if timeoutDuration > intervalDuration { - return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("scrape timeout cannot be greater than scrape interval (%q > %q)", timeout, interval) + return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("scrape timeout cannot be greater than scrape interval (%q > %q)", timeout, interval) } // Meta labels are deleted after relabelling. Other internal labels propagate to @@ -507,7 +507,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort err = res.Validate(func(l labels.Label) error { // Check label values are valid, drop the target if not. if !model.LabelValue(l.Value).IsValid() { - return errors.Errorf("invalid label value for %q: %q", l.Name, l.Value) + return fmt.Errorf("invalid label value for %q: %q", l.Name, l.Value) } return nil }) @@ -536,7 +536,7 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefault lset, origLabels, err := PopulateLabels(lb, cfg, noDefaultPort) if err != nil { - failures = append(failures, errors.Wrapf(err, "instance %d in group %s", i, tg)) + failures = append(failures, fmt.Errorf("instance %d in group %s: %w", i, tg, err)) } if !lset.IsEmpty() || !origLabels.IsEmpty() { targets = append(targets, NewTarget(lset, origLabels, cfg.Params)) diff --git a/storage/fanout_test.go b/storage/fanout_test.go index 0f9363d7a..a99c2f803 100644 --- a/storage/fanout_test.go +++ b/storage/fanout_test.go @@ -15,9 +15,9 @@ package storage_test import ( "context" + "errors" "testing" - "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" diff --git a/util/annotations/annotations.go b/util/annotations/annotations.go index 9d0b11a08..fa4983fc9 100644 --- a/util/annotations/annotations.go +++ b/util/annotations/annotations.go @@ -81,8 +81,8 @@ func (a Annotations) AsStrings(query string, maxAnnos int) []string { if maxAnnos > 0 && len(arr) >= maxAnnos { break } - anErr, ok := err.(annoErr) - if ok { + var anErr annoErr + if errors.As(err, &anErr) { anErr.Query = query err = anErr } From 1cd6c1cde5a2e44dc5869016b294c7dccba269dc Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Fri, 13 Oct 2023 10:58:26 +0300 Subject: [PATCH 546/632] ValidateHistogram: strict Count check in absence of NaNs Signed-off-by: Linas Medziunas --- promql/engine_test.go | 2 +- storage/interface.go | 1 + tsdb/db_test.go | 5 +++-- tsdb/head_append.go | 20 +++++++++++++++----- tsdb/head_test.go | 36 +++++++++++++++++++++--------------- 5 files changed, 41 insertions(+), 23 deletions(-) diff --git a/promql/engine_test.go b/promql/engine_test.go index baca992b8..7532a3294 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -3399,7 +3399,7 @@ func TestNativeHistogram_HistogramStdDevVar(t *testing.T) { { name: "-50, -8, 0, 3, 8, 9, 100, +Inf", h: &histogram.Histogram{ - Count: 8, + Count: 7, ZeroCount: 1, Sum: math.Inf(1), Schema: 3, diff --git a/storage/interface.go b/storage/interface.go index 211bcbc41..4da152aa4 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -44,6 +44,7 @@ var ( ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0") ErrNativeHistogramsDisabled = fmt.Errorf("native histograms are disabled") ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets") + ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)") ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative") ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative") ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided") diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 70d1844d4..f021faba9 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -508,7 +508,7 @@ func TestAmendHistogramDatapointCausesError(t *testing.T) { h := histogram.Histogram{ Schema: 3, - Count: 61, + Count: 52, Sum: 2.7, ZeroThreshold: 0.1, ZeroCount: 42, @@ -6314,6 +6314,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { t.Run("buckets disappearing", func(t *testing.T) { h.PositiveSpans[1].Length-- h.PositiveBuckets = h.PositiveBuckets[:len(h.PositiveBuckets)-1] + h.Count -= 3 appendHistogram(series1, 110, h, &exp1, histogram.CounterReset) testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) }) @@ -6533,7 +6534,7 @@ func TestNativeHistogramFlag(t *testing.T) { require.NoError(t, db.Close()) }) h := &histogram.Histogram{ - Count: 10, + Count: 9, ZeroCount: 4, ZeroThreshold: 0.001, Sum: 35.5, diff --git a/tsdb/head_append.go b/tsdb/head_append.go index d1f4d3035..330caad78 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -659,11 +659,21 @@ func ValidateHistogram(h *histogram.Histogram) error { return errors.Wrap(err, "positive side") } - if c := nCount + pCount + h.ZeroCount; c > h.Count { - return errors.Wrap( - storage.ErrHistogramCountNotBigEnough, - fmt.Sprintf("%d observations found in buckets, but the Count field is %d", c, h.Count), - ) + sumOfBuckets := nCount + pCount + h.ZeroCount + if math.IsNaN(h.Sum) { + if sumOfBuckets > h.Count { + return errors.Wrap( + storage.ErrHistogramCountNotBigEnough, + fmt.Sprintf("%d observations found in buckets, but the Count field is %d", sumOfBuckets, h.Count), + ) + } + } else { + if sumOfBuckets != h.Count { + return errors.Wrap( + storage.ErrHistogramCountMismatch, + fmt.Sprintf("%d observations found in buckets, but the Count field is %d", sumOfBuckets, h.Count), + ) + } } return nil diff --git a/tsdb/head_test.go b/tsdb/head_test.go index edecf8dfe..2feb745f1 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -3419,7 +3419,6 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { hists = tsdbutil.GenerateTestHistograms(numHistograms) } for _, h := range hists { - h.Count *= 2 h.NegativeSpans = h.PositiveSpans h.NegativeBuckets = h.PositiveBuckets _, err := app.AppendHistogram(0, s1, ts, h, nil) @@ -3442,7 +3441,6 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { hists = tsdbutil.GenerateTestFloatHistograms(numHistograms) } for _, h := range hists { - h.Count *= 2 h.NegativeSpans = h.PositiveSpans h.NegativeBuckets = h.PositiveBuckets _, err := app.AppendHistogram(0, s1, ts, nil, h) @@ -3484,7 +3482,6 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { } for _, h := range hists { ts++ - h.Count *= 2 h.NegativeSpans = h.PositiveSpans h.NegativeBuckets = h.PositiveBuckets _, err := app.AppendHistogram(0, s2, ts, h, nil) @@ -3521,7 +3518,6 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { } for _, h := range hists { ts++ - h.Count *= 2 h.NegativeSpans = h.PositiveSpans h.NegativeBuckets = h.PositiveBuckets _, err := app.AppendHistogram(0, s2, ts, nil, h) @@ -4907,7 +4903,7 @@ func TestHistogramValidation(t *testing.T) { "valid histogram": { h: tsdbutil.GenerateTestHistograms(1)[0], }, - "valid histogram that has its Count (4) higher than the actual total of buckets (2 + 1)": { + "valid histogram with NaN observations that has its Count (4) higher than the actual total of buckets (2 + 1)": { // This case is possible if NaN values (which do not fall into any bucket) are observed. h: &histogram.Histogram{ ZeroCount: 2, @@ -4917,6 +4913,17 @@ func TestHistogramValidation(t *testing.T) { PositiveBuckets: []int64{1}, }, }, + "rejects histogram without NaN observations that has its Count (4) higher than the actual total of buckets (2 + 1)": { + h: &histogram.Histogram{ + ZeroCount: 2, + Count: 4, + Sum: 333, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1}, + }, + errMsg: `3 observations found in buckets, but the Count field is 4: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, + skipFloat: true, + }, "rejects histogram that has too few negative buckets": { h: &histogram.Histogram{ NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}}, @@ -4981,7 +4988,7 @@ func TestHistogramValidation(t *testing.T) { NegativeBuckets: []int64{1}, PositiveBuckets: []int64{1}, }, - errMsg: `2 observations found in buckets, but the Count field is 0: histogram's observation count should be at least the number of observations found in the buckets`, + errMsg: `2 observations found in buckets, but the Count field is 0: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, skipFloat: true, }, "rejects a histogram that doesn't count the zero bucket in its count": { @@ -4993,7 +5000,7 @@ func TestHistogramValidation(t *testing.T) { NegativeBuckets: []int64{1}, PositiveBuckets: []int64{1}, }, - errMsg: `3 observations found in buckets, but the Count field is 2: histogram's observation count should be at least the number of observations found in the buckets`, + errMsg: `3 observations found in buckets, but the Count field is 2: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, skipFloat: true, }, } @@ -5029,8 +5036,8 @@ func generateBigTestHistograms(numHistograms, numBuckets int) []*histogram.Histo numSpans := numBuckets / 10 bucketsPerSide := numBuckets / 2 spanLength := uint32(bucketsPerSide / numSpans) - // Given all bucket deltas are 1, sum numHistograms + 1. - observationCount := numBuckets / 2 * (1 + numBuckets) + // Given all bucket deltas are 1, sum bucketsPerSide + 1. + observationCount := bucketsPerSide * (1 + bucketsPerSide) var histograms []*histogram.Histogram for i := 0; i < numHistograms; i++ { @@ -5491,14 +5498,13 @@ func TestCuttingNewHeadChunks(t *testing.T) { numSamples int numBytes int }{ - {30, 696}, - {30, 700}, - {30, 708}, - {30, 693}, + {40, 896}, + {40, 899}, + {40, 896}, + {30, 690}, {30, 691}, - {30, 692}, - {30, 695}, {30, 694}, + {30, 693}, }, }, "really large histograms": { From 1f8aea11d6f1a8b12a89011c1bb17c400454f0b4 Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Fri, 13 Oct 2023 10:58:48 +0300 Subject: [PATCH 547/632] Move histogram validation code to model/histogram Signed-off-by: Linas Medziunas --- model/histogram/test_utils.go | 52 +++++++++ model/histogram/validate.go | 136 +++++++++++++++++++++++ model/histogram/validate_test.go | 175 +++++++++++++++++++++++++++++ storage/interface.go | 17 +-- tsdb/agent/db.go | 4 +- tsdb/head_append.go | 111 +------------------ tsdb/head_test.go | 183 +------------------------------ 7 files changed, 377 insertions(+), 301 deletions(-) create mode 100644 model/histogram/test_utils.go create mode 100644 model/histogram/validate.go create mode 100644 model/histogram/validate_test.go diff --git a/model/histogram/test_utils.go b/model/histogram/test_utils.go new file mode 100644 index 000000000..9e9a711c2 --- /dev/null +++ b/model/histogram/test_utils.go @@ -0,0 +1,52 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +// GenerateBigTestHistograms generates a slice of histograms with given number of buckets each. +func GenerateBigTestHistograms(numHistograms, numBuckets int) []*Histogram { + numSpans := numBuckets / 10 + bucketsPerSide := numBuckets / 2 + spanLength := uint32(bucketsPerSide / numSpans) + // Given all bucket deltas are 1, sum bucketsPerSide + 1. + observationCount := bucketsPerSide * (1 + bucketsPerSide) + + var histograms []*Histogram + for i := 0; i < numHistograms; i++ { + h := &Histogram{ + Count: uint64(i + observationCount), + ZeroCount: uint64(i), + ZeroThreshold: 1e-128, + Sum: 18.4 * float64(i+1), + Schema: 2, + NegativeSpans: make([]Span, numSpans), + PositiveSpans: make([]Span, numSpans), + NegativeBuckets: make([]int64, bucketsPerSide), + PositiveBuckets: make([]int64, bucketsPerSide), + } + + for j := 0; j < numSpans; j++ { + s := Span{Offset: 1, Length: spanLength} + h.NegativeSpans[j] = s + h.PositiveSpans[j] = s + } + + for j := 0; j < bucketsPerSide; j++ { + h.NegativeBuckets[j] = 1 + h.PositiveBuckets[j] = 1 + } + + histograms = append(histograms, h) + } + return histograms +} diff --git a/model/histogram/validate.go b/model/histogram/validate.go new file mode 100644 index 000000000..41649b798 --- /dev/null +++ b/model/histogram/validate.go @@ -0,0 +1,136 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +import ( + "fmt" + "math" + + "github.com/pkg/errors" +) + +var ( + ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets") + ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)") + ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative") + ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative") + ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided") +) + +func ValidateHistogram(h *Histogram) error { + if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { + return errors.Wrap(err, "negative side") + } + if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return errors.Wrap(err, "positive side") + } + var nCount, pCount uint64 + err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true) + if err != nil { + return errors.Wrap(err, "negative side") + } + err = checkHistogramBuckets(h.PositiveBuckets, &pCount, true) + if err != nil { + return errors.Wrap(err, "positive side") + } + + sumOfBuckets := nCount + pCount + h.ZeroCount + if math.IsNaN(h.Sum) { + if sumOfBuckets > h.Count { + return errors.Wrap( + ErrHistogramCountNotBigEnough, + fmt.Sprintf("%d observations found in buckets, but the Count field is %d", sumOfBuckets, h.Count), + ) + } + } else { + if sumOfBuckets != h.Count { + return errors.Wrap( + ErrHistogramCountMismatch, + fmt.Sprintf("%d observations found in buckets, but the Count field is %d", sumOfBuckets, h.Count), + ) + } + } + + return nil +} + +func ValidateFloatHistogram(h *FloatHistogram) error { + if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { + return errors.Wrap(err, "negative side") + } + if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return errors.Wrap(err, "positive side") + } + var nCount, pCount float64 + err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false) + if err != nil { + return errors.Wrap(err, "negative side") + } + err = checkHistogramBuckets(h.PositiveBuckets, &pCount, false) + if err != nil { + return errors.Wrap(err, "positive side") + } + + // We do not check for h.Count being at least as large as the sum of the + // counts in the buckets because floating point precision issues can + // create false positives here. + + return nil +} + +func checkHistogramSpans(spans []Span, numBuckets int) error { + var spanBuckets int + for n, span := range spans { + if n > 0 && span.Offset < 0 { + return errors.Wrap( + ErrHistogramSpanNegativeOffset, + fmt.Sprintf("span number %d with offset %d", n+1, span.Offset), + ) + } + spanBuckets += int(span.Length) + } + if spanBuckets != numBuckets { + return errors.Wrap( + ErrHistogramSpansBucketsMismatch, + fmt.Sprintf("spans need %d buckets, have %d buckets", spanBuckets, numBuckets), + ) + } + return nil +} + +func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IBC, count *BC, deltas bool) error { + if len(buckets) == 0 { + return nil + } + + var last IBC + for i := 0; i < len(buckets); i++ { + var c IBC + if deltas { + c = last + buckets[i] + } else { + c = buckets[i] + } + if c < 0 { + return errors.Wrap( + ErrHistogramNegativeBucketCount, + fmt.Sprintf("bucket number %d has observation count of %v", i+1, c), + ) + } + last = c + *count += BC(c) + } + + return nil +} diff --git a/model/histogram/validate_test.go b/model/histogram/validate_test.go new file mode 100644 index 000000000..d9d8f0639 --- /dev/null +++ b/model/histogram/validate_test.go @@ -0,0 +1,175 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestHistogramValidation(t *testing.T) { + tests := map[string]struct { + h *Histogram + errMsg string + skipFloat bool + }{ + "valid histogram": { + h: &Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 19.4, + Schema: 1, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + NegativeSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{1, 1, -1, 0}, + }, + }, + "valid histogram with NaN observations that has its Count (4) higher than the actual total of buckets (2 + 1)": { + // This case is possible if NaN values (which do not fall into any bucket) are observed. + h: &Histogram{ + ZeroCount: 2, + Count: 4, + Sum: math.NaN(), + PositiveSpans: []Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1}, + }, + }, + "rejects histogram without NaN observations that has its Count (4) higher than the actual total of buckets (2 + 1)": { + h: &Histogram{ + ZeroCount: 2, + Count: 4, + Sum: 333, + PositiveSpans: []Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1}, + }, + errMsg: `3 observations found in buckets, but the Count field is 4: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, + skipFloat: true, + }, + "rejects histogram that has too few negative buckets": { + h: &Histogram{ + NegativeSpans: []Span{{Offset: 0, Length: 1}}, + NegativeBuckets: []int64{}, + }, + errMsg: `negative side: spans need 1 buckets, have 0 buckets: histogram spans specify different number of buckets than provided`, + }, + "rejects histogram that has too few positive buckets": { + h: &Histogram{ + PositiveSpans: []Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{}, + }, + errMsg: `positive side: spans need 1 buckets, have 0 buckets: histogram spans specify different number of buckets than provided`, + }, + "rejects histogram that has too many negative buckets": { + h: &Histogram{ + NegativeSpans: []Span{{Offset: 0, Length: 1}}, + NegativeBuckets: []int64{1, 2}, + }, + errMsg: `negative side: spans need 1 buckets, have 2 buckets: histogram spans specify different number of buckets than provided`, + }, + "rejects histogram that has too many positive buckets": { + h: &Histogram{ + PositiveSpans: []Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1, 2}, + }, + errMsg: `positive side: spans need 1 buckets, have 2 buckets: histogram spans specify different number of buckets than provided`, + }, + "rejects a histogram that has a negative span with a negative offset": { + h: &Histogram{ + NegativeSpans: []Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}}, + NegativeBuckets: []int64{1, 2}, + }, + errMsg: `negative side: span number 2 with offset -1: histogram has a span whose offset is negative`, + }, + "rejects a histogram which has a positive span with a negative offset": { + h: &Histogram{ + PositiveSpans: []Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}}, + PositiveBuckets: []int64{1, 2}, + }, + errMsg: `positive side: span number 2 with offset -1: histogram has a span whose offset is negative`, + }, + "rejects a histogram that has a negative bucket with a negative count": { + h: &Histogram{ + NegativeSpans: []Span{{Offset: -1, Length: 1}}, + NegativeBuckets: []int64{-1}, + }, + errMsg: `negative side: bucket number 1 has observation count of -1: histogram has a bucket whose observation count is negative`, + }, + "rejects a histogram that has a positive bucket with a negative count": { + h: &Histogram{ + PositiveSpans: []Span{{Offset: -1, Length: 1}}, + PositiveBuckets: []int64{-1}, + }, + errMsg: `positive side: bucket number 1 has observation count of -1: histogram has a bucket whose observation count is negative`, + }, + "rejects a histogram that has a lower count than count in buckets": { + h: &Histogram{ + Count: 0, + NegativeSpans: []Span{{Offset: -1, Length: 1}}, + PositiveSpans: []Span{{Offset: -1, Length: 1}}, + NegativeBuckets: []int64{1}, + PositiveBuckets: []int64{1}, + }, + errMsg: `2 observations found in buckets, but the Count field is 0: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, + skipFloat: true, + }, + "rejects a histogram that doesn't count the zero bucket in its count": { + h: &Histogram{ + Count: 2, + ZeroCount: 1, + NegativeSpans: []Span{{Offset: -1, Length: 1}}, + PositiveSpans: []Span{{Offset: -1, Length: 1}}, + NegativeBuckets: []int64{1}, + PositiveBuckets: []int64{1}, + }, + errMsg: `3 observations found in buckets, but the Count field is 2: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, + skipFloat: true, + }, + } + + for testName, tc := range tests { + t.Run(testName, func(t *testing.T) { + if err := ValidateHistogram(tc.h); tc.errMsg != "" { + require.EqualError(t, err, tc.errMsg) + } else { + require.NoError(t, err) + } + if tc.skipFloat { + return + } + if err := ValidateFloatHistogram(tc.h.ToFloat()); tc.errMsg != "" { + require.EqualError(t, err, tc.errMsg) + } else { + require.NoError(t, err) + } + }) + } +} + +func BenchmarkHistogramValidation(b *testing.B) { + histograms := GenerateBigTestHistograms(b.N, 500) + b.ResetTimer() + for _, h := range histograms { + require.NoError(b, ValidateHistogram(h)) + } +} diff --git a/storage/interface.go b/storage/interface.go index 4da152aa4..2b1b6a63e 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -37,17 +37,12 @@ var ( // ErrTooOldSample is when out of order support is enabled but the sample is outside the time window allowed. ErrTooOldSample = errors.New("too old sample") // ErrDuplicateSampleForTimestamp is when the sample has same timestamp but different value. - ErrDuplicateSampleForTimestamp = errors.New("duplicate sample for timestamp") - ErrOutOfOrderExemplar = errors.New("out of order exemplar") - ErrDuplicateExemplar = errors.New("duplicate exemplar") - ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength) - ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0") - ErrNativeHistogramsDisabled = fmt.Errorf("native histograms are disabled") - ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets") - ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)") - ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative") - ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative") - ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided") + ErrDuplicateSampleForTimestamp = errors.New("duplicate sample for timestamp") + ErrOutOfOrderExemplar = errors.New("out of order exemplar") + ErrDuplicateExemplar = errors.New("duplicate exemplar") + ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength) + ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0") + ErrNativeHistogramsDisabled = fmt.Errorf("native histograms are disabled") ) // SeriesRef is a generic series reference. In prometheus it is either a diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 3912b9d52..188b10585 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -883,13 +883,13 @@ func (a *appender) AppendExemplar(ref storage.SeriesRef, _ labels.Labels, e exem func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if h != nil { - if err := tsdb.ValidateHistogram(h); err != nil { + if err := histogram.ValidateHistogram(h); err != nil { return 0, err } } if fh != nil { - if err := tsdb.ValidateFloatHistogram(fh); err != nil { + if err := histogram.ValidateFloatHistogram(fh); err != nil { return 0, err } } diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 330caad78..eeaaa369f 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -521,13 +521,13 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels } if h != nil { - if err := ValidateHistogram(h); err != nil { + if err := histogram.ValidateHistogram(h); err != nil { return 0, err } } if fh != nil { - if err := ValidateFloatHistogram(fh); err != nil { + if err := histogram.ValidateFloatHistogram(fh); err != nil { return 0, err } } @@ -642,113 +642,6 @@ func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels, return ref, nil } -func ValidateHistogram(h *histogram.Histogram) error { - if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { - return errors.Wrap(err, "negative side") - } - if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { - return errors.Wrap(err, "positive side") - } - var nCount, pCount uint64 - err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true) - if err != nil { - return errors.Wrap(err, "negative side") - } - err = checkHistogramBuckets(h.PositiveBuckets, &pCount, true) - if err != nil { - return errors.Wrap(err, "positive side") - } - - sumOfBuckets := nCount + pCount + h.ZeroCount - if math.IsNaN(h.Sum) { - if sumOfBuckets > h.Count { - return errors.Wrap( - storage.ErrHistogramCountNotBigEnough, - fmt.Sprintf("%d observations found in buckets, but the Count field is %d", sumOfBuckets, h.Count), - ) - } - } else { - if sumOfBuckets != h.Count { - return errors.Wrap( - storage.ErrHistogramCountMismatch, - fmt.Sprintf("%d observations found in buckets, but the Count field is %d", sumOfBuckets, h.Count), - ) - } - } - - return nil -} - -func ValidateFloatHistogram(h *histogram.FloatHistogram) error { - if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { - return errors.Wrap(err, "negative side") - } - if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { - return errors.Wrap(err, "positive side") - } - var nCount, pCount float64 - err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false) - if err != nil { - return errors.Wrap(err, "negative side") - } - err = checkHistogramBuckets(h.PositiveBuckets, &pCount, false) - if err != nil { - return errors.Wrap(err, "positive side") - } - - // We do not check for h.Count being at least as large as the sum of the - // counts in the buckets because floating point precision issues can - // create false positives here. - - return nil -} - -func checkHistogramSpans(spans []histogram.Span, numBuckets int) error { - var spanBuckets int - for n, span := range spans { - if n > 0 && span.Offset < 0 { - return errors.Wrap( - storage.ErrHistogramSpanNegativeOffset, - fmt.Sprintf("span number %d with offset %d", n+1, span.Offset), - ) - } - spanBuckets += int(span.Length) - } - if spanBuckets != numBuckets { - return errors.Wrap( - storage.ErrHistogramSpansBucketsMismatch, - fmt.Sprintf("spans need %d buckets, have %d buckets", spanBuckets, numBuckets), - ) - } - return nil -} - -func checkHistogramBuckets[BC histogram.BucketCount, IBC histogram.InternalBucketCount](buckets []IBC, count *BC, deltas bool) error { - if len(buckets) == 0 { - return nil - } - - var last IBC - for i := 0; i < len(buckets); i++ { - var c IBC - if deltas { - c = last + buckets[i] - } else { - c = buckets[i] - } - if c < 0 { - return errors.Wrap( - storage.ErrHistogramNegativeBucketCount, - fmt.Sprintf("bucket number %d has observation count of %v", i+1, c), - ) - } - last = c - *count += BC(c) - } - - return nil -} - var _ storage.GetRef = &headAppender{} func (a *headAppender) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRef, labels.Labels) { diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 2feb745f1..1216dd0a6 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -4894,181 +4894,6 @@ func TestReplayAfterMmapReplayError(t *testing.T) { require.NoError(t, h.Close()) } -func TestHistogramValidation(t *testing.T) { - tests := map[string]struct { - h *histogram.Histogram - errMsg string - skipFloat bool - }{ - "valid histogram": { - h: tsdbutil.GenerateTestHistograms(1)[0], - }, - "valid histogram with NaN observations that has its Count (4) higher than the actual total of buckets (2 + 1)": { - // This case is possible if NaN values (which do not fall into any bucket) are observed. - h: &histogram.Histogram{ - ZeroCount: 2, - Count: 4, - Sum: math.NaN(), - PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, - PositiveBuckets: []int64{1}, - }, - }, - "rejects histogram without NaN observations that has its Count (4) higher than the actual total of buckets (2 + 1)": { - h: &histogram.Histogram{ - ZeroCount: 2, - Count: 4, - Sum: 333, - PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, - PositiveBuckets: []int64{1}, - }, - errMsg: `3 observations found in buckets, but the Count field is 4: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, - skipFloat: true, - }, - "rejects histogram that has too few negative buckets": { - h: &histogram.Histogram{ - NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}}, - NegativeBuckets: []int64{}, - }, - errMsg: `negative side: spans need 1 buckets, have 0 buckets: histogram spans specify different number of buckets than provided`, - }, - "rejects histogram that has too few positive buckets": { - h: &histogram.Histogram{ - PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, - PositiveBuckets: []int64{}, - }, - errMsg: `positive side: spans need 1 buckets, have 0 buckets: histogram spans specify different number of buckets than provided`, - }, - "rejects histogram that has too many negative buckets": { - h: &histogram.Histogram{ - NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}}, - NegativeBuckets: []int64{1, 2}, - }, - errMsg: `negative side: spans need 1 buckets, have 2 buckets: histogram spans specify different number of buckets than provided`, - }, - "rejects histogram that has too many positive buckets": { - h: &histogram.Histogram{ - PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, - PositiveBuckets: []int64{1, 2}, - }, - errMsg: `positive side: spans need 1 buckets, have 2 buckets: histogram spans specify different number of buckets than provided`, - }, - "rejects a histogram that has a negative span with a negative offset": { - h: &histogram.Histogram{ - NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}}, - NegativeBuckets: []int64{1, 2}, - }, - errMsg: `negative side: span number 2 with offset -1: histogram has a span whose offset is negative`, - }, - "rejects a histogram which has a positive span with a negative offset": { - h: &histogram.Histogram{ - PositiveSpans: []histogram.Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}}, - PositiveBuckets: []int64{1, 2}, - }, - errMsg: `positive side: span number 2 with offset -1: histogram has a span whose offset is negative`, - }, - "rejects a histogram that has a negative bucket with a negative count": { - h: &histogram.Histogram{ - NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}}, - NegativeBuckets: []int64{-1}, - }, - errMsg: `negative side: bucket number 1 has observation count of -1: histogram has a bucket whose observation count is negative`, - }, - "rejects a histogram that has a positive bucket with a negative count": { - h: &histogram.Histogram{ - PositiveSpans: []histogram.Span{{Offset: -1, Length: 1}}, - PositiveBuckets: []int64{-1}, - }, - errMsg: `positive side: bucket number 1 has observation count of -1: histogram has a bucket whose observation count is negative`, - }, - "rejects a histogram that has a lower count than count in buckets": { - h: &histogram.Histogram{ - Count: 0, - NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}}, - PositiveSpans: []histogram.Span{{Offset: -1, Length: 1}}, - NegativeBuckets: []int64{1}, - PositiveBuckets: []int64{1}, - }, - errMsg: `2 observations found in buckets, but the Count field is 0: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, - skipFloat: true, - }, - "rejects a histogram that doesn't count the zero bucket in its count": { - h: &histogram.Histogram{ - Count: 2, - ZeroCount: 1, - NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}}, - PositiveSpans: []histogram.Span{{Offset: -1, Length: 1}}, - NegativeBuckets: []int64{1}, - PositiveBuckets: []int64{1}, - }, - errMsg: `3 observations found in buckets, but the Count field is 2: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, - skipFloat: true, - }, - } - - for testName, tc := range tests { - t.Run(testName, func(t *testing.T) { - if err := ValidateHistogram(tc.h); tc.errMsg != "" { - require.EqualError(t, err, tc.errMsg) - } else { - require.NoError(t, err) - } - if tc.skipFloat { - return - } - if err := ValidateFloatHistogram(tc.h.ToFloat()); tc.errMsg != "" { - require.EqualError(t, err, tc.errMsg) - } else { - require.NoError(t, err) - } - }) - } -} - -func BenchmarkHistogramValidation(b *testing.B) { - histograms := generateBigTestHistograms(b.N, 500) - b.ResetTimer() - for _, h := range histograms { - require.NoError(b, ValidateHistogram(h)) - } -} - -func generateBigTestHistograms(numHistograms, numBuckets int) []*histogram.Histogram { - numSpans := numBuckets / 10 - bucketsPerSide := numBuckets / 2 - spanLength := uint32(bucketsPerSide / numSpans) - // Given all bucket deltas are 1, sum bucketsPerSide + 1. - observationCount := bucketsPerSide * (1 + bucketsPerSide) - - var histograms []*histogram.Histogram - for i := 0; i < numHistograms; i++ { - h := &histogram.Histogram{ - Count: uint64(i + observationCount), - ZeroCount: uint64(i), - ZeroThreshold: 1e-128, - Sum: 18.4 * float64(i+1), - Schema: 2, - NegativeSpans: make([]histogram.Span, numSpans), - PositiveSpans: make([]histogram.Span, numSpans), - NegativeBuckets: make([]int64, bucketsPerSide), - PositiveBuckets: make([]int64, bucketsPerSide), - } - - for j := 0; j < numSpans; j++ { - s := histogram.Span{Offset: 1, Length: spanLength} - h.NegativeSpans[j] = s - h.PositiveSpans[j] = s - } - - for j := 0; j < bucketsPerSide; j++ { - h.NegativeBuckets[j] = 1 - h.PositiveBuckets[j] = 1 - } - - histograms = append(histograms, h) - } - return histograms -} - func TestOOOAppendWithNoSeries(t *testing.T) { dir := t.TempDir() wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) @@ -5409,7 +5234,7 @@ func BenchmarkCuttingHeadHistogramChunks(b *testing.B) { numSamples = 50000 numBuckets = 100 ) - samples := generateBigTestHistograms(numSamples, numBuckets) + samples := histogram.GenerateBigTestHistograms(numSamples, numBuckets) h, _ := newTestHead(b, DefaultBlockDuration, wlog.CompressionNone, false) defer func() { @@ -5473,7 +5298,7 @@ func TestCuttingNewHeadChunks(t *testing.T) { "small histograms": { numTotalSamples: 240, histValFunc: func() func(i int) *histogram.Histogram { - hists := generateBigTestHistograms(240, 10) + hists := histogram.GenerateBigTestHistograms(240, 10) return func(i int) *histogram.Histogram { return hists[i] } @@ -5489,7 +5314,7 @@ func TestCuttingNewHeadChunks(t *testing.T) { "large histograms": { numTotalSamples: 240, histValFunc: func() func(i int) *histogram.Histogram { - hists := generateBigTestHistograms(240, 100) + hists := histogram.GenerateBigTestHistograms(240, 100) return func(i int) *histogram.Histogram { return hists[i] } @@ -5512,7 +5337,7 @@ func TestCuttingNewHeadChunks(t *testing.T) { // per chunk. numTotalSamples: 11, histValFunc: func() func(i int) *histogram.Histogram { - hists := generateBigTestHistograms(11, 100000) + hists := histogram.GenerateBigTestHistograms(11, 100000) return func(i int) *histogram.Histogram { return hists[i] } From ebed7d0612dca43e3ef89f00c3882b58463d21ab Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Fri, 3 Nov 2023 16:47:59 +0200 Subject: [PATCH 548/632] Change Validate to be a method on histogram structs Signed-off-by: Linas Medziunas --- model/histogram/float_histogram.go | 27 +++++ model/histogram/generic.go | 56 +++++++++ model/histogram/histogram.go | 45 ++++++++ model/histogram/histogram_test.go | 156 +++++++++++++++++++++++++ model/histogram/validate.go | 136 ---------------------- model/histogram/validate_test.go | 175 ----------------------------- tsdb/agent/db.go | 4 +- tsdb/head_append.go | 4 +- 8 files changed, 288 insertions(+), 315 deletions(-) delete mode 100644 model/histogram/validate.go delete mode 100644 model/histogram/validate_test.go diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index b519cbc58..fd6c2560f 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -17,6 +17,8 @@ import ( "fmt" "math" "strings" + + "github.com/pkg/errors" ) // FloatHistogram is similar to Histogram but uses float64 for all @@ -593,6 +595,31 @@ func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] { } } +// Validate validates consistency between span and bucket slices. Also, buckets are checked +// against negative values. +// We do not check for h.Count being at least as large as the sum of the +// counts in the buckets because floating point precision issues can +// create false positives here. +func (h *FloatHistogram) Validate() error { + if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { + return errors.Wrap(err, "negative side") + } + if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return errors.Wrap(err, "positive side") + } + var nCount, pCount float64 + err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false) + if err != nil { + return errors.Wrap(err, "negative side") + } + err = checkHistogramBuckets(h.PositiveBuckets, &pCount, false) + if err != nil { + return errors.Wrap(err, "positive side") + } + + return nil +} + // zeroCountForLargerThreshold returns what the histogram's zero count would be // if the ZeroThreshold had the provided larger (or equal) value. If the // provided value is less than the histogram's ZeroThreshold, the method panics. diff --git a/model/histogram/generic.go b/model/histogram/generic.go index 3c1ad7cc8..22048c44e 100644 --- a/model/histogram/generic.go +++ b/model/histogram/generic.go @@ -17,6 +17,16 @@ import ( "fmt" "math" "strings" + + "github.com/pkg/errors" +) + +var ( + ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets") + ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)") + ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative") + ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative") + ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided") ) // BucketCount is a type constraint for the count in a bucket, which can be @@ -347,6 +357,52 @@ func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmp return buckets, spans } +func checkHistogramSpans(spans []Span, numBuckets int) error { + var spanBuckets int + for n, span := range spans { + if n > 0 && span.Offset < 0 { + return errors.Wrap( + ErrHistogramSpanNegativeOffset, + fmt.Sprintf("span number %d with offset %d", n+1, span.Offset), + ) + } + spanBuckets += int(span.Length) + } + if spanBuckets != numBuckets { + return errors.Wrap( + ErrHistogramSpansBucketsMismatch, + fmt.Sprintf("spans need %d buckets, have %d buckets", spanBuckets, numBuckets), + ) + } + return nil +} + +func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IBC, count *BC, deltas bool) error { + if len(buckets) == 0 { + return nil + } + + var last IBC + for i := 0; i < len(buckets); i++ { + var c IBC + if deltas { + c = last + buckets[i] + } else { + c = buckets[i] + } + if c < 0 { + return errors.Wrap( + ErrHistogramNegativeBucketCount, + fmt.Sprintf("bucket number %d has observation count of %v", i+1, c), + ) + } + last = c + *count += BC(c) + } + + return nil +} + func getBound(idx, schema int32) float64 { // Here a bit of context about the behavior for the last bucket counting // regular numbers (called simply "last bucket" below) and the bucket diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go index 762e7de81..fa624841e 100644 --- a/model/histogram/histogram.go +++ b/model/histogram/histogram.go @@ -18,6 +18,7 @@ import ( "math" "strings" + "github.com/pkg/errors" "golang.org/x/exp/slices" ) @@ -328,6 +329,50 @@ func (h *Histogram) ToFloat() *FloatHistogram { } } +// Validate validates consistency between span and bucket slices. Also, buckets are checked +// against negative values. +// For histograms that have not observed any NaN values (based on IsNaN(h.Sum) check), a +// strict h.Count = nCount + pCount + h.ZeroCount check is performed. +// Otherwise, only a lower bound check will be done (h.Count >= nCount + pCount + h.ZeroCount), +// because NaN observations do not increment the values of buckets (but they do increment +// the total h.Count). +func (h *Histogram) Validate() error { + if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { + return errors.Wrap(err, "negative side") + } + if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return errors.Wrap(err, "positive side") + } + var nCount, pCount uint64 + err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true) + if err != nil { + return errors.Wrap(err, "negative side") + } + err = checkHistogramBuckets(h.PositiveBuckets, &pCount, true) + if err != nil { + return errors.Wrap(err, "positive side") + } + + sumOfBuckets := nCount + pCount + h.ZeroCount + if math.IsNaN(h.Sum) { + if sumOfBuckets > h.Count { + return errors.Wrap( + ErrHistogramCountNotBigEnough, + fmt.Sprintf("%d observations found in buckets, but the Count field is %d", sumOfBuckets, h.Count), + ) + } + } else { + if sumOfBuckets != h.Count { + return errors.Wrap( + ErrHistogramCountMismatch, + fmt.Sprintf("%d observations found in buckets, but the Count field is %d", sumOfBuckets, h.Count), + ) + } + } + + return nil +} + type regularBucketIterator struct { baseBucketIterator[uint64, int64] } diff --git a/model/histogram/histogram_test.go b/model/histogram/histogram_test.go index 23fb1779e..6f12f53e8 100644 --- a/model/histogram/histogram_test.go +++ b/model/histogram/histogram_test.go @@ -811,3 +811,159 @@ func TestHistogramCompact(t *testing.T) { }) } } + +func TestHistogramValidation(t *testing.T) { + tests := map[string]struct { + h *Histogram + errMsg string + skipFloat bool + }{ + "valid histogram": { + h: &Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 19.4, + Schema: 1, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + NegativeSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{1, 1, -1, 0}, + }, + }, + "valid histogram with NaN observations that has its Count (4) higher than the actual total of buckets (2 + 1)": { + // This case is possible if NaN values (which do not fall into any bucket) are observed. + h: &Histogram{ + ZeroCount: 2, + Count: 4, + Sum: math.NaN(), + PositiveSpans: []Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1}, + }, + }, + "rejects histogram without NaN observations that has its Count (4) higher than the actual total of buckets (2 + 1)": { + h: &Histogram{ + ZeroCount: 2, + Count: 4, + Sum: 333, + PositiveSpans: []Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1}, + }, + errMsg: `3 observations found in buckets, but the Count field is 4: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, + skipFloat: true, + }, + "rejects histogram that has too few negative buckets": { + h: &Histogram{ + NegativeSpans: []Span{{Offset: 0, Length: 1}}, + NegativeBuckets: []int64{}, + }, + errMsg: `negative side: spans need 1 buckets, have 0 buckets: histogram spans specify different number of buckets than provided`, + }, + "rejects histogram that has too few positive buckets": { + h: &Histogram{ + PositiveSpans: []Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{}, + }, + errMsg: `positive side: spans need 1 buckets, have 0 buckets: histogram spans specify different number of buckets than provided`, + }, + "rejects histogram that has too many negative buckets": { + h: &Histogram{ + NegativeSpans: []Span{{Offset: 0, Length: 1}}, + NegativeBuckets: []int64{1, 2}, + }, + errMsg: `negative side: spans need 1 buckets, have 2 buckets: histogram spans specify different number of buckets than provided`, + }, + "rejects histogram that has too many positive buckets": { + h: &Histogram{ + PositiveSpans: []Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1, 2}, + }, + errMsg: `positive side: spans need 1 buckets, have 2 buckets: histogram spans specify different number of buckets than provided`, + }, + "rejects a histogram that has a negative span with a negative offset": { + h: &Histogram{ + NegativeSpans: []Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}}, + NegativeBuckets: []int64{1, 2}, + }, + errMsg: `negative side: span number 2 with offset -1: histogram has a span whose offset is negative`, + }, + "rejects a histogram which has a positive span with a negative offset": { + h: &Histogram{ + PositiveSpans: []Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}}, + PositiveBuckets: []int64{1, 2}, + }, + errMsg: `positive side: span number 2 with offset -1: histogram has a span whose offset is negative`, + }, + "rejects a histogram that has a negative bucket with a negative count": { + h: &Histogram{ + NegativeSpans: []Span{{Offset: -1, Length: 1}}, + NegativeBuckets: []int64{-1}, + }, + errMsg: `negative side: bucket number 1 has observation count of -1: histogram has a bucket whose observation count is negative`, + }, + "rejects a histogram that has a positive bucket with a negative count": { + h: &Histogram{ + PositiveSpans: []Span{{Offset: -1, Length: 1}}, + PositiveBuckets: []int64{-1}, + }, + errMsg: `positive side: bucket number 1 has observation count of -1: histogram has a bucket whose observation count is negative`, + }, + "rejects a histogram that has a lower count than count in buckets": { + h: &Histogram{ + Count: 0, + NegativeSpans: []Span{{Offset: -1, Length: 1}}, + PositiveSpans: []Span{{Offset: -1, Length: 1}}, + NegativeBuckets: []int64{1}, + PositiveBuckets: []int64{1}, + }, + errMsg: `2 observations found in buckets, but the Count field is 0: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, + skipFloat: true, + }, + "rejects a histogram that doesn't count the zero bucket in its count": { + h: &Histogram{ + Count: 2, + ZeroCount: 1, + NegativeSpans: []Span{{Offset: -1, Length: 1}}, + PositiveSpans: []Span{{Offset: -1, Length: 1}}, + NegativeBuckets: []int64{1}, + PositiveBuckets: []int64{1}, + }, + errMsg: `3 observations found in buckets, but the Count field is 2: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, + skipFloat: true, + }, + } + + for testName, tc := range tests { + t.Run(testName, func(t *testing.T) { + if err := tc.h.Validate(); tc.errMsg != "" { + require.EqualError(t, err, tc.errMsg) + } else { + require.NoError(t, err) + } + if tc.skipFloat { + return + } + + fh := tc.h.ToFloat() + if err := fh.Validate(); tc.errMsg != "" { + require.EqualError(t, err, tc.errMsg) + } else { + require.NoError(t, err) + } + }) + } +} + +func BenchmarkHistogramValidation(b *testing.B) { + histograms := GenerateBigTestHistograms(b.N, 500) + b.ResetTimer() + for _, h := range histograms { + require.NoError(b, h.Validate()) + } +} diff --git a/model/histogram/validate.go b/model/histogram/validate.go deleted file mode 100644 index 41649b798..000000000 --- a/model/histogram/validate.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package histogram - -import ( - "fmt" - "math" - - "github.com/pkg/errors" -) - -var ( - ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets") - ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)") - ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative") - ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative") - ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided") -) - -func ValidateHistogram(h *Histogram) error { - if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { - return errors.Wrap(err, "negative side") - } - if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { - return errors.Wrap(err, "positive side") - } - var nCount, pCount uint64 - err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true) - if err != nil { - return errors.Wrap(err, "negative side") - } - err = checkHistogramBuckets(h.PositiveBuckets, &pCount, true) - if err != nil { - return errors.Wrap(err, "positive side") - } - - sumOfBuckets := nCount + pCount + h.ZeroCount - if math.IsNaN(h.Sum) { - if sumOfBuckets > h.Count { - return errors.Wrap( - ErrHistogramCountNotBigEnough, - fmt.Sprintf("%d observations found in buckets, but the Count field is %d", sumOfBuckets, h.Count), - ) - } - } else { - if sumOfBuckets != h.Count { - return errors.Wrap( - ErrHistogramCountMismatch, - fmt.Sprintf("%d observations found in buckets, but the Count field is %d", sumOfBuckets, h.Count), - ) - } - } - - return nil -} - -func ValidateFloatHistogram(h *FloatHistogram) error { - if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { - return errors.Wrap(err, "negative side") - } - if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { - return errors.Wrap(err, "positive side") - } - var nCount, pCount float64 - err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false) - if err != nil { - return errors.Wrap(err, "negative side") - } - err = checkHistogramBuckets(h.PositiveBuckets, &pCount, false) - if err != nil { - return errors.Wrap(err, "positive side") - } - - // We do not check for h.Count being at least as large as the sum of the - // counts in the buckets because floating point precision issues can - // create false positives here. - - return nil -} - -func checkHistogramSpans(spans []Span, numBuckets int) error { - var spanBuckets int - for n, span := range spans { - if n > 0 && span.Offset < 0 { - return errors.Wrap( - ErrHistogramSpanNegativeOffset, - fmt.Sprintf("span number %d with offset %d", n+1, span.Offset), - ) - } - spanBuckets += int(span.Length) - } - if spanBuckets != numBuckets { - return errors.Wrap( - ErrHistogramSpansBucketsMismatch, - fmt.Sprintf("spans need %d buckets, have %d buckets", spanBuckets, numBuckets), - ) - } - return nil -} - -func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IBC, count *BC, deltas bool) error { - if len(buckets) == 0 { - return nil - } - - var last IBC - for i := 0; i < len(buckets); i++ { - var c IBC - if deltas { - c = last + buckets[i] - } else { - c = buckets[i] - } - if c < 0 { - return errors.Wrap( - ErrHistogramNegativeBucketCount, - fmt.Sprintf("bucket number %d has observation count of %v", i+1, c), - ) - } - last = c - *count += BC(c) - } - - return nil -} diff --git a/model/histogram/validate_test.go b/model/histogram/validate_test.go deleted file mode 100644 index d9d8f0639..000000000 --- a/model/histogram/validate_test.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package histogram - -import ( - "math" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestHistogramValidation(t *testing.T) { - tests := map[string]struct { - h *Histogram - errMsg string - skipFloat bool - }{ - "valid histogram": { - h: &Histogram{ - Count: 12, - ZeroCount: 2, - ZeroThreshold: 0.001, - Sum: 19.4, - Schema: 1, - PositiveSpans: []Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{1, 1, -1, 0}, - NegativeSpans: []Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{1, 1, -1, 0}, - }, - }, - "valid histogram with NaN observations that has its Count (4) higher than the actual total of buckets (2 + 1)": { - // This case is possible if NaN values (which do not fall into any bucket) are observed. - h: &Histogram{ - ZeroCount: 2, - Count: 4, - Sum: math.NaN(), - PositiveSpans: []Span{{Offset: 0, Length: 1}}, - PositiveBuckets: []int64{1}, - }, - }, - "rejects histogram without NaN observations that has its Count (4) higher than the actual total of buckets (2 + 1)": { - h: &Histogram{ - ZeroCount: 2, - Count: 4, - Sum: 333, - PositiveSpans: []Span{{Offset: 0, Length: 1}}, - PositiveBuckets: []int64{1}, - }, - errMsg: `3 observations found in buckets, but the Count field is 4: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, - skipFloat: true, - }, - "rejects histogram that has too few negative buckets": { - h: &Histogram{ - NegativeSpans: []Span{{Offset: 0, Length: 1}}, - NegativeBuckets: []int64{}, - }, - errMsg: `negative side: spans need 1 buckets, have 0 buckets: histogram spans specify different number of buckets than provided`, - }, - "rejects histogram that has too few positive buckets": { - h: &Histogram{ - PositiveSpans: []Span{{Offset: 0, Length: 1}}, - PositiveBuckets: []int64{}, - }, - errMsg: `positive side: spans need 1 buckets, have 0 buckets: histogram spans specify different number of buckets than provided`, - }, - "rejects histogram that has too many negative buckets": { - h: &Histogram{ - NegativeSpans: []Span{{Offset: 0, Length: 1}}, - NegativeBuckets: []int64{1, 2}, - }, - errMsg: `negative side: spans need 1 buckets, have 2 buckets: histogram spans specify different number of buckets than provided`, - }, - "rejects histogram that has too many positive buckets": { - h: &Histogram{ - PositiveSpans: []Span{{Offset: 0, Length: 1}}, - PositiveBuckets: []int64{1, 2}, - }, - errMsg: `positive side: spans need 1 buckets, have 2 buckets: histogram spans specify different number of buckets than provided`, - }, - "rejects a histogram that has a negative span with a negative offset": { - h: &Histogram{ - NegativeSpans: []Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}}, - NegativeBuckets: []int64{1, 2}, - }, - errMsg: `negative side: span number 2 with offset -1: histogram has a span whose offset is negative`, - }, - "rejects a histogram which has a positive span with a negative offset": { - h: &Histogram{ - PositiveSpans: []Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}}, - PositiveBuckets: []int64{1, 2}, - }, - errMsg: `positive side: span number 2 with offset -1: histogram has a span whose offset is negative`, - }, - "rejects a histogram that has a negative bucket with a negative count": { - h: &Histogram{ - NegativeSpans: []Span{{Offset: -1, Length: 1}}, - NegativeBuckets: []int64{-1}, - }, - errMsg: `negative side: bucket number 1 has observation count of -1: histogram has a bucket whose observation count is negative`, - }, - "rejects a histogram that has a positive bucket with a negative count": { - h: &Histogram{ - PositiveSpans: []Span{{Offset: -1, Length: 1}}, - PositiveBuckets: []int64{-1}, - }, - errMsg: `positive side: bucket number 1 has observation count of -1: histogram has a bucket whose observation count is negative`, - }, - "rejects a histogram that has a lower count than count in buckets": { - h: &Histogram{ - Count: 0, - NegativeSpans: []Span{{Offset: -1, Length: 1}}, - PositiveSpans: []Span{{Offset: -1, Length: 1}}, - NegativeBuckets: []int64{1}, - PositiveBuckets: []int64{1}, - }, - errMsg: `2 observations found in buckets, but the Count field is 0: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, - skipFloat: true, - }, - "rejects a histogram that doesn't count the zero bucket in its count": { - h: &Histogram{ - Count: 2, - ZeroCount: 1, - NegativeSpans: []Span{{Offset: -1, Length: 1}}, - PositiveSpans: []Span{{Offset: -1, Length: 1}}, - NegativeBuckets: []int64{1}, - PositiveBuckets: []int64{1}, - }, - errMsg: `3 observations found in buckets, but the Count field is 2: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, - skipFloat: true, - }, - } - - for testName, tc := range tests { - t.Run(testName, func(t *testing.T) { - if err := ValidateHistogram(tc.h); tc.errMsg != "" { - require.EqualError(t, err, tc.errMsg) - } else { - require.NoError(t, err) - } - if tc.skipFloat { - return - } - if err := ValidateFloatHistogram(tc.h.ToFloat()); tc.errMsg != "" { - require.EqualError(t, err, tc.errMsg) - } else { - require.NoError(t, err) - } - }) - } -} - -func BenchmarkHistogramValidation(b *testing.B) { - histograms := GenerateBigTestHistograms(b.N, 500) - b.ResetTimer() - for _, h := range histograms { - require.NoError(b, ValidateHistogram(h)) - } -} diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 188b10585..e4d44afa2 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -883,13 +883,13 @@ func (a *appender) AppendExemplar(ref storage.SeriesRef, _ labels.Labels, e exem func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if h != nil { - if err := histogram.ValidateHistogram(h); err != nil { + if err := h.Validate(); err != nil { return 0, err } } if fh != nil { - if err := histogram.ValidateFloatHistogram(fh); err != nil { + if err := fh.Validate(); err != nil { return 0, err } } diff --git a/tsdb/head_append.go b/tsdb/head_append.go index eeaaa369f..3663c800a 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -521,13 +521,13 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels } if h != nil { - if err := histogram.ValidateHistogram(h); err != nil { + if err := h.Validate(); err != nil { return 0, err } } if fh != nil { - if err := histogram.ValidateFloatHistogram(fh); err != nil { + if err := fh.Validate(); err != nil { return 0, err } } From 222d46d24351cd15f64636970ae888501f879a77 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Fri, 3 Nov 2023 15:34:31 -0400 Subject: [PATCH 549/632] Linode: Add GPU label Signed-off-by: Julien Pivotto --- discovery/linode/linode.go | 2 ++ discovery/linode/linode_test.go | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index 63213c87b..a5e047b94 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -51,6 +51,7 @@ const ( linodeLabelStatus = linodeLabel + "status" linodeLabelTags = linodeLabel + "tags" linodeLabelGroup = linodeLabel + "group" + linodeLabelGPUs = linodeLabel + "gpus" linodeLabelHypervisor = linodeLabel + "hypervisor" linodeLabelBackups = linodeLabel + "backups" linodeLabelSpecsDiskBytes = linodeLabel + "specs_disk_bytes" @@ -302,6 +303,7 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro linodeLabelType: model.LabelValue(instance.Type), linodeLabelStatus: model.LabelValue(instance.Status), linodeLabelGroup: model.LabelValue(instance.Group), + linodeLabelGPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.GPUs)), linodeLabelHypervisor: model.LabelValue(instance.Hypervisor), linodeLabelBackups: model.LabelValue(backupsStatus), linodeLabelSpecsDiskBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Disk)<<20)), diff --git a/discovery/linode/linode_test.go b/discovery/linode/linode_test.go index 67eb8198e..988313b70 100644 --- a/discovery/linode/linode_test.go +++ b/discovery/linode/linode_test.go @@ -85,6 +85,7 @@ func TestLinodeSDRefresh(t *testing.T) { "__meta_linode_status": model.LabelValue("running"), "__meta_linode_tags": model.LabelValue(",monitoring,"), "__meta_linode_group": model.LabelValue(""), + "__meta_linode_gpus": model.LabelValue("0"), "__meta_linode_hypervisor": model.LabelValue("kvm"), "__meta_linode_backups": model.LabelValue("disabled"), "__meta_linode_specs_disk_bytes": model.LabelValue("85899345920"), @@ -109,6 +110,7 @@ func TestLinodeSDRefresh(t *testing.T) { "__meta_linode_status": model.LabelValue("running"), "__meta_linode_tags": model.LabelValue(",monitoring,"), "__meta_linode_group": model.LabelValue(""), + "__meta_linode_gpus": model.LabelValue("0"), "__meta_linode_hypervisor": model.LabelValue("kvm"), "__meta_linode_backups": model.LabelValue("disabled"), "__meta_linode_specs_disk_bytes": model.LabelValue("85899345920"), @@ -132,6 +134,7 @@ func TestLinodeSDRefresh(t *testing.T) { "__meta_linode_status": model.LabelValue("running"), "__meta_linode_tags": model.LabelValue(",monitoring,"), "__meta_linode_group": model.LabelValue(""), + "__meta_linode_gpus": model.LabelValue("0"), "__meta_linode_hypervisor": model.LabelValue("kvm"), "__meta_linode_backups": model.LabelValue("disabled"), "__meta_linode_specs_disk_bytes": model.LabelValue("53687091200"), @@ -155,6 +158,7 @@ func TestLinodeSDRefresh(t *testing.T) { "__meta_linode_status": model.LabelValue("running"), "__meta_linode_tags": model.LabelValue(",monitoring,"), "__meta_linode_group": model.LabelValue(""), + "__meta_linode_gpus": model.LabelValue("0"), "__meta_linode_hypervisor": model.LabelValue("kvm"), "__meta_linode_backups": model.LabelValue("disabled"), "__meta_linode_specs_disk_bytes": model.LabelValue("26843545600"), From 75b59e0f3d2361779b008ac0cebb3d47a9f324bd Mon Sep 17 00:00:00 2001 From: SuperQ Date: Tue, 5 Sep 2023 06:27:33 +0200 Subject: [PATCH 550/632] Update golangci-lint. Update golangci-lint for Go 1.21. * Use consistent go-version syntax. Signed-off-by: SuperQ --- .github/workflows/ci.yml | 4 ++-- .github/workflows/codeql-analysis.yml | 2 +- scripts/golangci-lint.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 08fbb0a33..8b5e0e125 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -55,7 +55,7 @@ jobs: - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 - uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0 with: - go-version: '>=1.21 <1.22' + go-version: 1.21.x - run: | $TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"} go test $TestTargets -vet=off -v @@ -143,7 +143,7 @@ jobs: uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0 with: cache: false - go-version: 1.20.x + go-version: 1.21.x - name: Install snmp_exporter/generator dependencies run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index d98233757..b5ed4a0a3 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -27,7 +27,7 @@ jobs: uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 - uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0 with: - go-version: '>=1.21 <1.22' + go-version: 1.21.x - name: Initialize CodeQL uses: github/codeql-action/init@74483a38d39275f33fcff5f35b679b5ca4a26a99 # v2.22.5 diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index babd8a0c4..ffa6b3090 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -22,7 +22,7 @@ jobs: - name: install Go uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0 with: - go-version: 1.20.x + go-version: 1.21.x - name: Install snmp_exporter/generator dependencies run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' From 05fba53e57cddbfaeb475e65089df922f868760f Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Wed, 8 Nov 2023 04:49:39 +0100 Subject: [PATCH 551/632] web : use Go standard package Signed-off-by: Matthieu MOREL --- .golangci.yml | 7 ---- model/histogram/float_histogram.go | 10 +++--- model/histogram/generic.go | 18 +++-------- model/histogram/histogram.go | 19 ++++------- web/api/v1/api.go | 52 ++++++++++++++---------------- web/api/v1/api_test.go | 6 ++-- web/api/v1/errors_test.go | 2 +- web/federate.go | 4 +-- web/federate_test.go | 10 +++--- web/web.go | 3 +- 10 files changed, 51 insertions(+), 80 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 2d53106c0..666d22cbe 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -41,13 +41,6 @@ issues: text: "import 'github.com/pkg/errors' is not allowed" linters: - depguard - - path: web/ - linters: - - errorlint - - path: web/ - text: "import 'github.com/pkg/errors' is not allowed" - linters: - - depguard - linters: - godot source: "^// ===" diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index fd6c2560f..22d33f5a4 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -17,8 +17,6 @@ import ( "fmt" "math" "strings" - - "github.com/pkg/errors" ) // FloatHistogram is similar to Histogram but uses float64 for all @@ -602,19 +600,19 @@ func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] { // create false positives here. func (h *FloatHistogram) Validate() error { if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { - return errors.Wrap(err, "negative side") + return fmt.Errorf("negative side: %w", err) } if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { - return errors.Wrap(err, "positive side") + return fmt.Errorf("positive side: %w", err) } var nCount, pCount float64 err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false) if err != nil { - return errors.Wrap(err, "negative side") + return fmt.Errorf("negative side: %w", err) } err = checkHistogramBuckets(h.PositiveBuckets, &pCount, false) if err != nil { - return errors.Wrap(err, "positive side") + return fmt.Errorf("positive side: %w", err) } return nil diff --git a/model/histogram/generic.go b/model/histogram/generic.go index 22048c44e..7e4eb1ecb 100644 --- a/model/histogram/generic.go +++ b/model/histogram/generic.go @@ -14,11 +14,10 @@ package histogram import ( + "errors" "fmt" "math" "strings" - - "github.com/pkg/errors" ) var ( @@ -361,18 +360,12 @@ func checkHistogramSpans(spans []Span, numBuckets int) error { var spanBuckets int for n, span := range spans { if n > 0 && span.Offset < 0 { - return errors.Wrap( - ErrHistogramSpanNegativeOffset, - fmt.Sprintf("span number %d with offset %d", n+1, span.Offset), - ) + return fmt.Errorf("span number %d with offset %d: %w", n+1, span.Offset, ErrHistogramSpanNegativeOffset) } spanBuckets += int(span.Length) } if spanBuckets != numBuckets { - return errors.Wrap( - ErrHistogramSpansBucketsMismatch, - fmt.Sprintf("spans need %d buckets, have %d buckets", spanBuckets, numBuckets), - ) + return fmt.Errorf("spans need %d buckets, have %d buckets: %w", spanBuckets, numBuckets, ErrHistogramSpansBucketsMismatch) } return nil } @@ -391,10 +384,7 @@ func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IB c = buckets[i] } if c < 0 { - return errors.Wrap( - ErrHistogramNegativeBucketCount, - fmt.Sprintf("bucket number %d has observation count of %v", i+1, c), - ) + return fmt.Errorf("bucket number %d has observation count of %v: %w", i+1, c, ErrHistogramNegativeBucketCount) } last = c *count += BC(c) diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go index fa624841e..30c23e5e7 100644 --- a/model/histogram/histogram.go +++ b/model/histogram/histogram.go @@ -18,7 +18,6 @@ import ( "math" "strings" - "github.com/pkg/errors" "golang.org/x/exp/slices" ) @@ -338,35 +337,29 @@ func (h *Histogram) ToFloat() *FloatHistogram { // the total h.Count). func (h *Histogram) Validate() error { if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { - return errors.Wrap(err, "negative side") + return fmt.Errorf("negative side: %w", err) } if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { - return errors.Wrap(err, "positive side") + return fmt.Errorf("positive side: %w", err) } var nCount, pCount uint64 err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true) if err != nil { - return errors.Wrap(err, "negative side") + return fmt.Errorf("negative side: %w", err) } err = checkHistogramBuckets(h.PositiveBuckets, &pCount, true) if err != nil { - return errors.Wrap(err, "positive side") + return fmt.Errorf("positive side: %w", err) } sumOfBuckets := nCount + pCount + h.ZeroCount if math.IsNaN(h.Sum) { if sumOfBuckets > h.Count { - return errors.Wrap( - ErrHistogramCountNotBigEnough, - fmt.Sprintf("%d observations found in buckets, but the Count field is %d", sumOfBuckets, h.Count), - ) + return fmt.Errorf("%d observations found in buckets, but the Count field is %d: %w", sumOfBuckets, h.Count, ErrHistogramCountNotBigEnough) } } else { if sumOfBuckets != h.Count { - return errors.Wrap( - ErrHistogramCountMismatch, - fmt.Sprintf("%d observations found in buckets, but the Count field is %d", sumOfBuckets, h.Count), - ) + return fmt.Errorf("%d observations found in buckets, but the Count field is %d: %w", sumOfBuckets, h.Count, ErrHistogramCountMismatch) } } diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 6c8128f3f..34abe80aa 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -15,6 +15,7 @@ package v1 import ( "context" + "errors" "fmt" "math" "math/rand" @@ -33,7 +34,6 @@ import ( "github.com/grafana/regexp" jsoniter "github.com/json-iterator/go" "github.com/munnerz/goautoneg" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/route" @@ -317,7 +317,7 @@ func (api *API) ClearCodecs() { } func setUnavailStatusOnTSDBNotReady(r apiFuncResult) apiFuncResult { - if r.err != nil && errors.Cause(r.err.err) == tsdb.ErrNotReady { + if r.err != nil && errors.Is(r.err.err, tsdb.ErrNotReady) { r.err.typ = errorUnavailable } return r @@ -415,7 +415,7 @@ type QueryData struct { func invalidParamError(err error, parameter string) apiFuncResult { return apiFuncResult{nil, &apiError{ - errorBadData, errors.Wrapf(err, "invalid parameter %q", parameter), + errorBadData, fmt.Errorf("invalid parameter %q: %w", parameter, err), }, nil, nil} } @@ -624,17 +624,15 @@ func returnAPIError(err error) *apiError { return nil } - cause := errors.Unwrap(err) - if cause == nil { - cause = err - } - - switch cause.(type) { - case promql.ErrQueryCanceled: + var eqc promql.ErrQueryCanceled + var eqt promql.ErrQueryTimeout + var es promql.ErrStorage + switch { + case errors.As(err, &eqc): return &apiError{errorCanceled, err} - case promql.ErrQueryTimeout: + case errors.As(err, &eqt): return &apiError{errorTimeout, err} - case promql.ErrStorage: + case errors.As(err, &es): return &apiError{errorInternal, err} } @@ -709,7 +707,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { name := route.Param(ctx, "name") if !model.LabelNameRE.MatchString(name) { - return apiFuncResult{nil, &apiError{errorBadData, errors.Errorf("invalid label name: %q", name)}, nil, nil} + return apiFuncResult{nil, &apiError{errorBadData, fmt.Errorf("invalid label name: %q", name)}, nil, nil} } start, err := parseTimeParam(r, "start", MinTime) @@ -797,7 +795,7 @@ func (api *API) series(r *http.Request) (result apiFuncResult) { ctx := r.Context() if err := r.ParseForm(); err != nil { - return apiFuncResult{nil, &apiError{errorBadData, errors.Wrapf(err, "error parsing form values")}, nil, nil} + return apiFuncResult{nil, &apiError{errorBadData, fmt.Errorf("error parsing form values: %w", err)}, nil, nil} } if len(r.Form["match[]"]) == 0 { return apiFuncResult{nil, &apiError{errorBadData, errors.New("no match[] parameter provided")}, nil, nil} @@ -1028,7 +1026,7 @@ func (api *API) targets(r *http.Request) apiFuncResult { case err == nil && lastErrStr == "": return "" case err != nil: - return errors.Wrapf(err, lastErrStr).Error() + return fmt.Errorf("%s: %w", lastErrStr, err).Error() default: return lastErrStr } @@ -1347,7 +1345,7 @@ type RecordingRule struct { func (api *API) rules(r *http.Request) apiFuncResult { if err := r.ParseForm(); err != nil { - return apiFuncResult{nil, &apiError{errorBadData, errors.Wrapf(err, "error parsing form values")}, nil, nil} + return apiFuncResult{nil, &apiError{errorBadData, fmt.Errorf("error parsing form values: %w", err)}, nil, nil} } queryFormToSet := func(values []string) map[string]struct{} { @@ -1367,7 +1365,7 @@ func (api *API) rules(r *http.Request) apiFuncResult { typ := strings.ToLower(r.URL.Query().Get("type")) if typ != "" && typ != "alert" && typ != "record" { - return invalidParamError(errors.Errorf("not supported value %q", typ), "type") + return invalidParamError(fmt.Errorf("not supported value %q", typ), "type") } returnAlerts := typ == "" || typ == "alert" @@ -1453,7 +1451,7 @@ func (api *API) rules(r *http.Request) apiFuncResult { Type: "recording", } default: - err := errors.Errorf("failed to assert type of rule '%v'", rule.Name()) + err := fmt.Errorf("failed to assert type of rule '%v'", rule.Name()) return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil} } @@ -1560,7 +1558,7 @@ func (api *API) serveTSDBStatus(r *http.Request) apiFuncResult { } metrics, err := api.gatherer.Gather() if err != nil { - return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("error gathering runtime status: %s", err)}, nil, nil} + return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("error gathering runtime status: %w", err)}, nil, nil} } chunkCount := int64(math.NaN()) for _, mF := range metrics { @@ -1636,7 +1634,7 @@ func (api *API) deleteSeries(r *http.Request) apiFuncResult { return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil} } if err := r.ParseForm(); err != nil { - return apiFuncResult{nil, &apiError{errorBadData, errors.Wrap(err, "error parsing form values")}, nil, nil} + return apiFuncResult{nil, &apiError{errorBadData, fmt.Errorf("error parsing form values: %w", err)}, nil, nil} } if len(r.Form["match[]"]) == 0 { return apiFuncResult{nil, &apiError{errorBadData, errors.New("no match[] parameter provided")}, nil, nil} @@ -1675,7 +1673,7 @@ func (api *API) snapshot(r *http.Request) apiFuncResult { if r.FormValue("skip_head") != "" { skipHead, err = strconv.ParseBool(r.FormValue("skip_head")) if err != nil { - return invalidParamError(errors.Wrapf(err, "unable to parse boolean"), "skip_head") + return invalidParamError(fmt.Errorf("unable to parse boolean: %w", err), "skip_head") } } @@ -1687,10 +1685,10 @@ func (api *API) snapshot(r *http.Request) apiFuncResult { dir = filepath.Join(snapdir, name) ) if err := os.MkdirAll(dir, 0o777); err != nil { - return apiFuncResult{nil, &apiError{errorInternal, errors.Wrap(err, "create snapshot directory")}, nil, nil} + return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("create snapshot directory: %w", err)}, nil, nil} } if err := api.db.Snapshot(dir, !skipHead); err != nil { - return apiFuncResult{nil, &apiError{errorInternal, errors.Wrap(err, "create snapshot")}, nil, nil} + return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("create snapshot: %w", err)}, nil, nil} } return apiFuncResult{struct { @@ -1805,7 +1803,7 @@ func parseTimeParam(r *http.Request, paramName string, defaultValue time.Time) ( } result, err := parseTime(val) if err != nil { - return time.Time{}, errors.Wrapf(err, "Invalid time value for '%s'", paramName) + return time.Time{}, fmt.Errorf("Invalid time value for '%s': %w", paramName, err) } return result, nil } @@ -1830,21 +1828,21 @@ func parseTime(s string) (time.Time, error) { case maxTimeFormatted: return MaxTime, nil } - return time.Time{}, errors.Errorf("cannot parse %q to a valid timestamp", s) + return time.Time{}, fmt.Errorf("cannot parse %q to a valid timestamp", s) } func parseDuration(s string) (time.Duration, error) { if d, err := strconv.ParseFloat(s, 64); err == nil { ts := d * float64(time.Second) if ts > float64(math.MaxInt64) || ts < float64(math.MinInt64) { - return 0, errors.Errorf("cannot parse %q to a valid duration. It overflows int64", s) + return 0, fmt.Errorf("cannot parse %q to a valid duration. It overflows int64", s) } return time.Duration(ts), nil } if d, err := model.ParseDuration(s); err == nil { return time.Duration(d), nil } - return 0, errors.Errorf("cannot parse %q to a valid duration", s) + return 0, fmt.Errorf("cannot parse %q to a valid duration", s) } func parseMatchersParam(matchers []string) ([][]*labels.Matcher, error) { diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 320d174fc..a5dd8640b 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -16,6 +16,7 @@ package v1 import ( "context" "encoding/json" + "errors" "fmt" "io" "net/http" @@ -33,7 +34,6 @@ import ( "github.com/prometheus/prometheus/util/stats" "github.com/go-kit/log" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -2974,7 +2974,7 @@ func (f *fakeDB) WALReplayStatus() (tsdb.WALReplayStatus, error) { } func TestAdminEndpoints(t *testing.T) { - tsdb, tsdbWithError, tsdbNotReady := &fakeDB{}, &fakeDB{err: errors.New("some error")}, &fakeDB{err: errors.Wrap(tsdb.ErrNotReady, "wrap")} + tsdb, tsdbWithError, tsdbNotReady := &fakeDB{}, &fakeDB{err: errors.New("some error")}, &fakeDB{err: fmt.Errorf("wrap: %w", tsdb.ErrNotReady)} snapshotAPI := func(api *API) apiFunc { return api.snapshot } cleanAPI := func(api *API) apiFunc { return api.cleanTombstones } deleteAPI := func(api *API) apiFunc { return api.deleteSeries } @@ -3354,7 +3354,7 @@ func TestParseTimeParam(t *testing.T) { asTime: time.Time{}, asError: func() error { _, err := parseTime("baz") - return errors.Wrapf(err, "Invalid time value for '%s'", "foo") + return fmt.Errorf("Invalid time value for '%s': %w", "foo", err) }, }, }, diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go index 38ca5b62c..b6ec7d4e1 100644 --- a/web/api/v1/errors_test.go +++ b/web/api/v1/errors_test.go @@ -15,6 +15,7 @@ package v1 import ( "context" + "errors" "fmt" "net/http" "net/http/httptest" @@ -24,7 +25,6 @@ import ( "github.com/go-kit/log" "github.com/grafana/regexp" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/route" "github.com/stretchr/testify/require" diff --git a/web/federate.go b/web/federate.go index babc97e55..2b79d0053 100644 --- a/web/federate.go +++ b/web/federate.go @@ -14,6 +14,7 @@ package web import ( + "errors" "fmt" "net/http" "sort" @@ -21,7 +22,6 @@ import ( "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" @@ -86,7 +86,7 @@ func (h *Handler) federation(w http.ResponseWriter, req *http.Request) { q, err := h.localStorage.Querier(mint, maxt) if err != nil { federationErrors.Inc() - if errors.Cause(err) == tsdb.ErrNotReady { + if errors.Is(err, tsdb.ErrNotReady) { http.Error(w, err.Error(), http.StatusServiceUnavailable) return } diff --git a/web/federate_test.go b/web/federate_test.go index ab93dcf28..80539861d 100644 --- a/web/federate_test.go +++ b/web/federate_test.go @@ -16,6 +16,7 @@ package web import ( "bytes" "context" + "errors" "fmt" "io" "net/http" @@ -25,7 +26,6 @@ import ( "testing" "time" - "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -238,15 +238,15 @@ type notReadyReadStorage struct { } func (notReadyReadStorage) Querier(int64, int64) (storage.Querier, error) { - return nil, errors.Wrap(tsdb.ErrNotReady, "wrap") + return nil, fmt.Errorf("wrap: %w", tsdb.ErrNotReady) } func (notReadyReadStorage) StartTime() (int64, error) { - return 0, errors.Wrap(tsdb.ErrNotReady, "wrap") + return 0, fmt.Errorf("wrap: %w", tsdb.ErrNotReady) } func (notReadyReadStorage) Stats(string, int) (*tsdb.Stats, error) { - return nil, errors.Wrap(tsdb.ErrNotReady, "wrap") + return nil, fmt.Errorf("wrap: %w", tsdb.ErrNotReady) } // Regression test for https://github.com/prometheus/prometheus/issues/7181. @@ -396,7 +396,7 @@ func TestFederationWithNativeHistograms(t *testing.T) { l := labels.Labels{} for { et, err := p.Next() - if err == io.EOF { + if err != nil && errors.Is(err, io.EOF) { break } require.NoError(t, err) diff --git a/web/web.go b/web/web.go index ccf97805a..43b79c235 100644 --- a/web/web.go +++ b/web/web.go @@ -40,7 +40,6 @@ import ( "github.com/go-kit/log/level" "github.com/grafana/regexp" "github.com/mwitkow/go-conntrack" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" io_prometheus_client "github.com/prometheus/client_model/go" @@ -732,7 +731,7 @@ func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) { metrics, err := h.gatherer.Gather() if err != nil { - return status, errors.Errorf("error gathering runtime status: %s", err) + return status, fmt.Errorf("error gathering runtime status: %w", err) } for _, mF := range metrics { switch *mF.Name { From 724737006dc3122dc7487741159d24cf84b1c76e Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Wed, 8 Nov 2023 09:22:31 +0100 Subject: [PATCH 552/632] tsdb/agent: use Go standard errors package Signed-off-by: Matthieu MOREL Signed-off-by: Matthieu MOREL --- tsdb/agent/db.go | 47 ++++++++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index e4d44afa2..66861a487 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -15,6 +15,7 @@ package agent import ( "context" + "errors" "fmt" "math" "path/filepath" @@ -24,7 +25,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "go.uber.org/atomic" @@ -263,7 +263,7 @@ func Open(l log.Logger, reg prometheus.Registerer, rs *remote.Storage, dir strin w, err := wlog.NewSize(l, reg, dir, opts.WALSegmentSize, opts.WALCompression) if err != nil { - return nil, errors.Wrap(err, "creating WAL") + return nil, fmt.Errorf("creating WAL: %w", err) } db := &DB{ @@ -302,7 +302,7 @@ func Open(l log.Logger, reg prometheus.Registerer, rs *remote.Storage, dir strin if err := db.replayWAL(); err != nil { level.Warn(db.logger).Log("msg", "encountered WAL read error, attempting repair", "err", err) if err := w.Repair(err); err != nil { - return nil, errors.Wrap(err, "repair corrupted WAL") + return nil, fmt.Errorf("repair corrupted WAL: %w", err) } level.Info(db.logger).Log("msg", "successfully repaired WAL") } @@ -352,7 +352,7 @@ func (db *DB) replayWAL() error { dir, startFrom, err := wlog.LastCheckpoint(db.wal.Dir()) if err != nil && err != record.ErrNotFound { - return errors.Wrap(err, "find last checkpoint") + return fmt.Errorf("find last checkpoint: %w", err) } multiRef := map[chunks.HeadSeriesRef]chunks.HeadSeriesRef{} @@ -360,7 +360,7 @@ func (db *DB) replayWAL() error { if err == nil { sr, err := wlog.NewSegmentsReader(dir) if err != nil { - return errors.Wrap(err, "open checkpoint") + return fmt.Errorf("open checkpoint: %w", err) } defer func() { if err := sr.Close(); err != nil { @@ -371,7 +371,7 @@ func (db *DB) replayWAL() error { // A corrupted checkpoint is a hard error for now and requires user // intervention. There's likely little data that can be recovered anyway. if err := db.loadWAL(wlog.NewReader(sr), multiRef); err != nil { - return errors.Wrap(err, "backfill checkpoint") + return fmt.Errorf("backfill checkpoint: %w", err) } startFrom++ level.Info(db.logger).Log("msg", "WAL checkpoint loaded") @@ -380,14 +380,14 @@ func (db *DB) replayWAL() error { // Find the last segment. _, last, err := wlog.Segments(db.wal.Dir()) if err != nil { - return errors.Wrap(err, "finding WAL segments") + return fmt.Errorf("finding WAL segments: %w", err) } // Backfil segments from the most recent checkpoint onwards. for i := startFrom; i <= last; i++ { seg, err := wlog.OpenReadSegment(wlog.SegmentName(db.wal.Dir(), i)) if err != nil { - return errors.Wrap(err, fmt.Sprintf("open WAL segment: %d", i)) + return fmt.Errorf("open WAL segment: %d: %w", i, err) } sr := wlog.NewSegmentBufReader(seg) @@ -432,7 +432,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H series, err = dec.Series(rec, series) if err != nil { errCh <- &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode series"), + Err: fmt.Errorf("decode series: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -444,7 +444,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H samples, err = dec.Samples(rec, samples) if err != nil { errCh <- &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode samples"), + Err: fmt.Errorf("decode samples: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -456,7 +456,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H histograms, err = dec.HistogramSamples(rec, histograms) if err != nil { errCh <- &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode histogram samples"), + Err: fmt.Errorf("decode histogram samples: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -468,7 +468,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms) if err != nil { errCh <- &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode float histogram samples"), + Err: fmt.Errorf("decode float histogram samples: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -482,7 +482,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H continue default: errCh <- &wlog.CorruptionErr{ - Err: errors.Errorf("invalid record type %v", dec.Type(rec)), + Err: fmt.Errorf("invalid record type %v", dec.Type(rec)), Segment: r.Segment(), Offset: r.Offset(), } @@ -568,7 +568,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H return err default: if r.Err() != nil { - return errors.Wrap(r.Err(), "read records") + return fmt.Errorf("read records: %w", r.Err()) } return nil } @@ -622,13 +622,13 @@ func (db *DB) truncate(mint int64) error { first, last, err := wlog.Segments(db.wal.Dir()) if err != nil { - return errors.Wrap(err, "get segment range") + return fmt.Errorf("get segment range: %w", err) } // Start a new segment so low ingestion volume instances don't have more WAL // than needed. if _, err := db.wal.NextSegment(); err != nil { - return errors.Wrap(err, "next segment") + return fmt.Errorf("next segment: %w", err) } last-- // Never consider most recent segment for checkpoint @@ -656,10 +656,11 @@ func (db *DB) truncate(mint int64) error { if _, err = wlog.Checkpoint(db.logger, db.wal, first, last, keep, mint); err != nil { db.metrics.checkpointCreationFail.Inc() - if _, ok := errors.Cause(err).(*wlog.CorruptionErr); ok { + var cerr *wlog.CorruptionErr + if errors.As(err, &cerr) { db.metrics.walCorruptionsTotal.Inc() } - return errors.Wrap(err, "create checkpoint") + return fmt.Errorf("create checkpoint: %w", err) } if err := db.wal.Truncate(last + 1); err != nil { // If truncating fails, we'll just try it again at the next checkpoint. @@ -780,11 +781,11 @@ func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v flo // equivalent validation code in the TSDB's headAppender. l = l.WithoutEmpty() if l.IsEmpty() { - return 0, errors.Wrap(tsdb.ErrInvalidSample, "empty labelset") + return 0, fmt.Errorf("empty labelset: %w", tsdb.ErrInvalidSample) } if lbl, dup := l.HasDuplicateLabelNames(); dup { - return 0, errors.Wrap(tsdb.ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, lbl)) + return 0, fmt.Errorf(`label name "%s" is not unique: %w`, lbl, tsdb.ErrInvalidSample) } var created bool @@ -841,7 +842,7 @@ func (a *appender) AppendExemplar(ref storage.SeriesRef, _ labels.Labels, e exem e.Labels = e.Labels.WithoutEmpty() if lbl, dup := e.Labels.HasDuplicateLabelNames(); dup { - return 0, errors.Wrap(tsdb.ErrInvalidExemplar, fmt.Sprintf(`label name "%s" is not unique`, lbl)) + return 0, fmt.Errorf(`label name "%s" is not unique: %w`, lbl, tsdb.ErrInvalidExemplar) } // Exemplar label length does not include chars involved in text rendering such as quotes @@ -903,11 +904,11 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int // equivalent validation code in the TSDB's headAppender. l = l.WithoutEmpty() if l.IsEmpty() { - return 0, errors.Wrap(tsdb.ErrInvalidSample, "empty labelset") + return 0, fmt.Errorf("empty labelset: %w", tsdb.ErrInvalidSample) } if lbl, dup := l.HasDuplicateLabelNames(); dup { - return 0, errors.Wrap(tsdb.ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, lbl)) + return 0, fmt.Errorf(`label name "%s" is not unique: %w`, lbl, tsdb.ErrInvalidSample) } var created bool From b60f9f801e7c4f13d6ed041106a7540ddf80edd6 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Wed, 8 Nov 2023 09:35:46 +0100 Subject: [PATCH 553/632] tsdb/chunkenc: use Go standard errors package Signed-off-by: Matthieu MOREL --- tsdb/chunkenc/chunk.go | 11 +++++------ tsdb/chunkenc/varbit.go | 7 +++---- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/tsdb/chunkenc/chunk.go b/tsdb/chunkenc/chunk.go index e7ff5b165..f4d11986c 100644 --- a/tsdb/chunkenc/chunk.go +++ b/tsdb/chunkenc/chunk.go @@ -14,11 +14,10 @@ package chunkenc import ( + "fmt" "math" "sync" - "github.com/pkg/errors" - "github.com/prometheus/prometheus/model/histogram" ) @@ -293,7 +292,7 @@ func (p *pool) Get(e Encoding, b []byte) (Chunk, error) { c.b.count = 0 return c, nil } - return nil, errors.Errorf("invalid chunk encoding %q", e) + return nil, fmt.Errorf("invalid chunk encoding %q", e) } func (p *pool) Put(c Chunk) error { @@ -332,7 +331,7 @@ func (p *pool) Put(c Chunk) error { sh.b.count = 0 p.floatHistogram.Put(c) default: - return errors.Errorf("invalid chunk encoding %q", c.Encoding()) + return fmt.Errorf("invalid chunk encoding %q", c.Encoding()) } return nil } @@ -349,7 +348,7 @@ func FromData(e Encoding, d []byte) (Chunk, error) { case EncFloatHistogram: return &FloatHistogramChunk{b: bstream{count: 0, stream: d}}, nil } - return nil, errors.Errorf("invalid chunk encoding %q", e) + return nil, fmt.Errorf("invalid chunk encoding %q", e) } // NewEmptyChunk returns an empty chunk for the given encoding. @@ -362,5 +361,5 @@ func NewEmptyChunk(e Encoding) (Chunk, error) { case EncFloatHistogram: return NewFloatHistogramChunk(), nil } - return nil, errors.Errorf("invalid chunk encoding %q", e) + return nil, fmt.Errorf("invalid chunk encoding %q", e) } diff --git a/tsdb/chunkenc/varbit.go b/tsdb/chunkenc/varbit.go index 449f9fbac..b43574dcb 100644 --- a/tsdb/chunkenc/varbit.go +++ b/tsdb/chunkenc/varbit.go @@ -14,9 +14,8 @@ package chunkenc import ( + "fmt" "math/bits" - - "github.com/pkg/errors" ) // putVarbitInt writes an int64 using varbit encoding with a bit bucketing @@ -109,7 +108,7 @@ func readVarbitInt(b *bstreamReader) (int64, error) { val = int64(bits) default: - return 0, errors.Errorf("invalid bit pattern %b", d) + return 0, fmt.Errorf("invalid bit pattern %b", d) } if sz != 0 { @@ -215,7 +214,7 @@ func readVarbitUint(b *bstreamReader) (uint64, error) { return 0, err } default: - return 0, errors.Errorf("invalid bit pattern %b", d) + return 0, fmt.Errorf("invalid bit pattern %b", d) } if sz != 0 { From ece8286305ce1bd9feca8632c3db28c7c38a5093 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Wed, 8 Nov 2023 10:02:59 +0100 Subject: [PATCH 554/632] tsdb/chunk: use Go standard errors package Signed-off-by: Matthieu MOREL --- tsdb/chunks/chunks.go | 38 ++++++++++++-------------- tsdb/chunks/head_chunks.go | 56 +++++++++++++++++++------------------- 2 files changed, 46 insertions(+), 48 deletions(-) diff --git a/tsdb/chunks/chunks.go b/tsdb/chunks/chunks.go index 2d5fba733..c4c4e3c93 100644 --- a/tsdb/chunks/chunks.go +++ b/tsdb/chunks/chunks.go @@ -24,8 +24,6 @@ import ( "path/filepath" "strconv" - "github.com/pkg/errors" - "github.com/prometheus/prometheus/tsdb/chunkenc" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" @@ -285,7 +283,7 @@ func checkCRC32(data, sum []byte) error { // This combination of shifts is the inverse of digest.Sum() in go/src/hash/crc32. want := uint32(sum[0])<<24 + uint32(sum[1])<<16 + uint32(sum[2])<<8 + uint32(sum[3]) if got != want { - return errors.Errorf("checksum mismatch expected:%x, actual:%x", want, got) + return fmt.Errorf("checksum mismatch expected:%x, actual:%x", want, got) } return nil } @@ -398,12 +396,12 @@ func (w *Writer) cut() error { func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, allocSize int64) (headerSize int, newFile *os.File, seq int, returnErr error) { p, seq, err := nextSequenceFile(dirFile.Name()) if err != nil { - return 0, nil, 0, errors.Wrap(err, "next sequence file") + return 0, nil, 0, fmt.Errorf("next sequence file: %w", err) } ptmp := p + ".tmp" f, err := os.OpenFile(ptmp, os.O_WRONLY|os.O_CREATE, 0o666) if err != nil { - return 0, nil, 0, errors.Wrap(err, "open temp file") + return 0, nil, 0, fmt.Errorf("open temp file: %w", err) } defer func() { if returnErr != nil { @@ -418,11 +416,11 @@ func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, all }() if allocSize > 0 { if err = fileutil.Preallocate(f, allocSize, true); err != nil { - return 0, nil, 0, errors.Wrap(err, "preallocate") + return 0, nil, 0, fmt.Errorf("preallocate: %w", err) } } if err = dirFile.Sync(); err != nil { - return 0, nil, 0, errors.Wrap(err, "sync directory") + return 0, nil, 0, fmt.Errorf("sync directory: %w", err) } // Write header metadata for new file. @@ -432,24 +430,24 @@ func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, all n, err := f.Write(metab) if err != nil { - return 0, nil, 0, errors.Wrap(err, "write header") + return 0, nil, 0, fmt.Errorf("write header: %w", err) } if err := f.Close(); err != nil { - return 0, nil, 0, errors.Wrap(err, "close temp file") + return 0, nil, 0, fmt.Errorf("close temp file: %w", err) } f = nil if err := fileutil.Rename(ptmp, p); err != nil { - return 0, nil, 0, errors.Wrap(err, "replace file") + return 0, nil, 0, fmt.Errorf("replace file: %w", err) } f, err = os.OpenFile(p, os.O_WRONLY, 0o666) if err != nil { - return 0, nil, 0, errors.Wrap(err, "open final file") + return 0, nil, 0, fmt.Errorf("open final file: %w", err) } // Skip header for further writes. if _, err := f.Seek(int64(n), 0); err != nil { - return 0, nil, 0, errors.Wrap(err, "seek in final file") + return 0, nil, 0, fmt.Errorf("seek in final file: %w", err) } return n, f, seq, nil } @@ -606,16 +604,16 @@ func newReader(bs []ByteSlice, cs []io.Closer, pool chunkenc.Pool) (*Reader, err cr := Reader{pool: pool, bs: bs, cs: cs} for i, b := range cr.bs { if b.Len() < SegmentHeaderSize { - return nil, errors.Wrapf(errInvalidSize, "invalid segment header in segment %d", i) + return nil, fmt.Errorf("invalid segment header in segment %d: %w", i, errInvalidSize) } // Verify magic number. if m := binary.BigEndian.Uint32(b.Range(0, MagicChunksSize)); m != MagicChunks { - return nil, errors.Errorf("invalid magic number %x", m) + return nil, fmt.Errorf("invalid magic number %x", m) } // Verify chunk format version. if v := int(b.Range(MagicChunksSize, MagicChunksSize+ChunksFormatVersionSize)[0]); v != chunksFormatV1 { - return nil, errors.Errorf("invalid chunk format version %d", v) + return nil, fmt.Errorf("invalid chunk format version %d", v) } cr.size += int64(b.Len()) } @@ -641,7 +639,7 @@ func NewDirReader(dir string, pool chunkenc.Pool) (*Reader, error) { f, err := fileutil.OpenMmapFile(fn) if err != nil { return nil, tsdb_errors.NewMulti( - errors.Wrap(err, "mmap files"), + fmt.Errorf("mmap files: %w", err), tsdb_errors.CloseAll(cs), ).Err() } @@ -673,20 +671,20 @@ func (s *Reader) Chunk(meta Meta) (chunkenc.Chunk, error) { sgmIndex, chkStart := BlockChunkRef(meta.Ref).Unpack() if sgmIndex >= len(s.bs) { - return nil, errors.Errorf("segment index %d out of range", sgmIndex) + return nil, fmt.Errorf("segment index %d out of range", sgmIndex) } sgmBytes := s.bs[sgmIndex] if chkStart+MaxChunkLengthFieldSize > sgmBytes.Len() { - return nil, errors.Errorf("segment doesn't include enough bytes to read the chunk size data field - required:%v, available:%v", chkStart+MaxChunkLengthFieldSize, sgmBytes.Len()) + return nil, fmt.Errorf("segment doesn't include enough bytes to read the chunk size data field - required:%v, available:%v", chkStart+MaxChunkLengthFieldSize, sgmBytes.Len()) } // With the minimum chunk length this should never cause us reading // over the end of the slice. c := sgmBytes.Range(chkStart, chkStart+MaxChunkLengthFieldSize) chkDataLen, n := binary.Uvarint(c) if n <= 0 { - return nil, errors.Errorf("reading chunk length failed with %d", n) + return nil, fmt.Errorf("reading chunk length failed with %d", n) } chkEncStart := chkStart + n @@ -695,7 +693,7 @@ func (s *Reader) Chunk(meta Meta) (chunkenc.Chunk, error) { chkDataEnd := chkEnd - crc32.Size if chkEnd > sgmBytes.Len() { - return nil, errors.Errorf("segment doesn't include enough bytes to read the chunk - required:%v, available:%v", chkEnd, sgmBytes.Len()) + return nil, fmt.Errorf("segment doesn't include enough bytes to read the chunk - required:%v, available:%v", chkEnd, sgmBytes.Len()) } sum := sgmBytes.Range(chkDataEnd, chkEnd) diff --git a/tsdb/chunks/head_chunks.go b/tsdb/chunks/head_chunks.go index d73eb36f8..b495b6182 100644 --- a/tsdb/chunks/head_chunks.go +++ b/tsdb/chunks/head_chunks.go @@ -17,6 +17,8 @@ import ( "bufio" "bytes" "encoding/binary" + "errors" + "fmt" "hash" "io" "os" @@ -25,7 +27,6 @@ import ( "sync" "github.com/dennwc/varint" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" "golang.org/x/exp/slices" @@ -107,7 +108,7 @@ type CorruptionErr struct { } func (e *CorruptionErr) Error() string { - return errors.Wrapf(e.Err, "corruption in head chunk file %s", segmentFile(e.Dir, e.FileIndex)).Error() + return fmt.Errorf("corruption in head chunk file %s: %w", segmentFile(e.Dir, e.FileIndex), e.Err).Error() } // chunkPos keeps track of the position in the head chunk files. @@ -240,10 +241,10 @@ type mmappedChunkFile struct { func NewChunkDiskMapper(reg prometheus.Registerer, dir string, pool chunkenc.Pool, writeBufferSize, writeQueueSize int) (*ChunkDiskMapper, error) { // Validate write buffer size. if writeBufferSize < MinWriteBufferSize || writeBufferSize > MaxWriteBufferSize { - return nil, errors.Errorf("ChunkDiskMapper write buffer size should be between %d and %d (actual: %d)", MinWriteBufferSize, MaxWriteBufferSize, writeBufferSize) + return nil, fmt.Errorf("ChunkDiskMapper write buffer size should be between %d and %d (actual: %d)", MinWriteBufferSize, MaxWriteBufferSize, writeBufferSize) } if writeBufferSize%1024 != 0 { - return nil, errors.Errorf("ChunkDiskMapper write buffer size should be a multiple of 1024 (actual: %d)", writeBufferSize) + return nil, fmt.Errorf("ChunkDiskMapper write buffer size should be a multiple of 1024 (actual: %d)", writeBufferSize) } if err := os.MkdirAll(dir, 0o777); err != nil { @@ -320,7 +321,7 @@ func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) { for seq, fn := range files { f, err := fileutil.OpenMmapFile(fn) if err != nil { - return errors.Wrapf(err, "mmap files, file: %s", fn) + return fmt.Errorf("mmap files, file: %s: %w", fn, err) } cdm.closers[seq] = f cdm.mmappedChunkFiles[seq] = &mmappedChunkFile{byteSlice: realByteSlice(f.Bytes())} @@ -335,23 +336,23 @@ func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) { lastSeq := chkFileIndices[0] for _, seq := range chkFileIndices[1:] { if seq != lastSeq+1 { - return errors.Errorf("found unsequential head chunk files %s (index: %d) and %s (index: %d)", files[lastSeq], lastSeq, files[seq], seq) + return fmt.Errorf("found unsequential head chunk files %s (index: %d) and %s (index: %d)", files[lastSeq], lastSeq, files[seq], seq) } lastSeq = seq } for i, b := range cdm.mmappedChunkFiles { if b.byteSlice.Len() < HeadChunkFileHeaderSize { - return errors.Wrapf(errInvalidSize, "%s: invalid head chunk file header", files[i]) + return fmt.Errorf("%s: invalid head chunk file header: %w", files[i], errInvalidSize) } // Verify magic number. if m := binary.BigEndian.Uint32(b.byteSlice.Range(0, MagicChunksSize)); m != MagicHeadChunks { - return errors.Errorf("%s: invalid magic number %x", files[i], m) + return fmt.Errorf("%s: invalid magic number %x", files[i], m) } // Verify chunk format version. if v := int(b.byteSlice.Range(MagicChunksSize, MagicChunksSize+ChunksFormatVersionSize)[0]); v != chunksFormatV1 { - return errors.Errorf("%s: invalid chunk format version %d", files[i], v) + return fmt.Errorf("%s: invalid chunk format version %d", files[i], v) } } @@ -394,16 +395,16 @@ func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr erro f, err := os.Open(files[lastFile]) if err != nil { - return files, errors.Wrap(err, "open file during last head chunk file repair") + return files, fmt.Errorf("open file during last head chunk file repair: %w", err) } buf := make([]byte, MagicChunksSize) size, err := f.Read(buf) if err != nil && err != io.EOF { - return files, errors.Wrap(err, "failed to read magic number during last head chunk file repair") + return files, fmt.Errorf("failed to read magic number during last head chunk file repair: %w", err) } if err := f.Close(); err != nil { - return files, errors.Wrap(err, "close file during last head chunk file repair") + return files, fmt.Errorf("close file during last head chunk file repair: %w", err) } // We either don't have enough bytes for the magic number or the magic number is 0. @@ -413,7 +414,7 @@ func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr erro if size < MagicChunksSize || binary.BigEndian.Uint32(buf) == 0 { // Corrupt file, hence remove it. if err := os.RemoveAll(files[lastFile]); err != nil { - return files, errors.Wrap(err, "delete corrupted, empty head chunk file during last file repair") + return files, fmt.Errorf("delete corrupted, empty head chunk file during last file repair: %w", err) } delete(files, lastFile) } @@ -559,7 +560,7 @@ func (cdm *ChunkDiskMapper) cutAndExpectRef(chkRef ChunkDiskMapperRef) (err erro } if expSeq, expOffset := chkRef.Unpack(); seq != expSeq || offset != expOffset { - return errors.Errorf("expected newly cut file to have sequence:offset %d:%d, got %d:%d", expSeq, expOffset, seq, offset) + return fmt.Errorf("expected newly cut file to have sequence:offset %d:%d, got %d:%d", expSeq, expOffset, seq, offset) } return nil @@ -701,13 +702,13 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error return nil, &CorruptionErr{ Dir: cdm.dir.Name(), FileIndex: -1, - Err: errors.Errorf("head chunk file index %d more than current open file", sgmIndex), + Err: fmt.Errorf("head chunk file index %d more than current open file", sgmIndex), } } return nil, &CorruptionErr{ Dir: cdm.dir.Name(), FileIndex: sgmIndex, - Err: errors.Errorf("head chunk file index %d does not exist on disk", sgmIndex), + Err: fmt.Errorf("head chunk file index %d does not exist on disk", sgmIndex), } } @@ -715,7 +716,7 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error return nil, &CorruptionErr{ Dir: cdm.dir.Name(), FileIndex: sgmIndex, - Err: errors.Errorf("head chunk file doesn't include enough bytes to read the chunk size data field - required:%v, available:%v", chkStart+MaxChunkLengthFieldSize, mmapFile.byteSlice.Len()), + Err: fmt.Errorf("head chunk file doesn't include enough bytes to read the chunk size data field - required:%v, available:%v", chkStart+MaxChunkLengthFieldSize, mmapFile.byteSlice.Len()), } } @@ -734,7 +735,7 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error return nil, &CorruptionErr{ Dir: cdm.dir.Name(), FileIndex: sgmIndex, - Err: errors.Errorf("reading chunk length failed with %d", n), + Err: fmt.Errorf("reading chunk length failed with %d", n), } } @@ -744,7 +745,7 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error return nil, &CorruptionErr{ Dir: cdm.dir.Name(), FileIndex: sgmIndex, - Err: errors.Errorf("head chunk file doesn't include enough bytes to read the chunk - required:%v, available:%v", chkDataEnd, mmapFile.byteSlice.Len()), + Err: fmt.Errorf("head chunk file doesn't include enough bytes to read the chunk - required:%v, available:%v", chkDataEnd, mmapFile.byteSlice.Len()), } } @@ -761,7 +762,7 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error return nil, &CorruptionErr{ Dir: cdm.dir.Name(), FileIndex: sgmIndex, - Err: errors.Errorf("checksum mismatch expected:%x, actual:%x", sum, act), + Err: fmt.Errorf("checksum mismatch expected:%x, actual:%x", sum, act), } } @@ -829,7 +830,7 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu return &CorruptionErr{ Dir: cdm.dir.Name(), FileIndex: segID, - Err: errors.Errorf("head chunk file has some unread data, but doesn't include enough bytes to read the chunk header"+ + Err: fmt.Errorf("head chunk file has some unread data, but doesn't include enough bytes to read the chunk header"+ " - required:%v, available:%v, file:%d", idx+MaxHeadChunkMetaSize, fileEnd, segID), } } @@ -866,7 +867,7 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu return &CorruptionErr{ Dir: cdm.dir.Name(), FileIndex: segID, - Err: errors.Errorf("head chunk file doesn't include enough bytes to read the chunk header - required:%v, available:%v, file:%d", idx+CRCSize, fileEnd, segID), + Err: fmt.Errorf("head chunk file doesn't include enough bytes to read the chunk header - required:%v, available:%v, file:%d", idx+CRCSize, fileEnd, segID), } } @@ -879,7 +880,7 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu return &CorruptionErr{ Dir: cdm.dir.Name(), FileIndex: segID, - Err: errors.Errorf("checksum mismatch expected:%x, actual:%x", sum, act), + Err: fmt.Errorf("checksum mismatch expected:%x, actual:%x", sum, act), } } idx += CRCSize @@ -905,7 +906,7 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu return &CorruptionErr{ Dir: cdm.dir.Name(), FileIndex: segID, - Err: errors.Errorf("head chunk file doesn't include enough bytes to read the last chunk data - required:%v, available:%v, file:%d", idx, fileEnd, segID), + Err: fmt.Errorf("head chunk file doesn't include enough bytes to read the last chunk data - required:%v, available:%v, file:%d", idx, fileEnd, segID), } } } @@ -998,10 +999,9 @@ func (cdm *ChunkDiskMapper) deleteFiles(removedFiles []int) ([]int, error) { // DeleteCorrupted deletes all the head chunk files after the one which had the corruption // (including the corrupt file). func (cdm *ChunkDiskMapper) DeleteCorrupted(originalErr error) error { - err := errors.Cause(originalErr) // So that we can pick up errors even if wrapped. - cerr, ok := err.(*CorruptionErr) - if !ok { - return errors.Wrap(originalErr, "cannot handle error") + var cerr *CorruptionErr + if !errors.As(originalErr, &cerr) { + return fmt.Errorf("cannot handle error: %w", originalErr) } // Delete all the head chunk files following the corrupt head chunk file. From ae9221e152c01daba1de8ddea4c01f9f6e86451c Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Wed, 8 Nov 2023 13:08:33 +0100 Subject: [PATCH 555/632] tsdb/index.Symbols: Drop context argument from Lookup method (#13058) Drop context argument from tsdb/index.Symbols.Lookup since lookup should be fast and the context checking is a performance hit. Signed-off-by: Arve Knudsen --- tsdb/index/index.go | 9 +++------ tsdb/index/index_test.go | 5 ++--- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 0eb0d1434..893167c25 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -923,7 +923,7 @@ func (w *Writer) writePostingsToTmpFiles() error { // Symbol numbers are in order, so the strings will also be in order. slices.Sort(values) for _, v := range values { - value, err := w.symbols.Lookup(w.ctx, v) + value, err := w.symbols.Lookup(v) if err != nil { return err } @@ -1295,7 +1295,7 @@ func NewSymbols(bs ByteSlice, version, off int) (*Symbols, error) { return s, nil } -func (s Symbols) Lookup(ctx context.Context, o uint32) (string, error) { +func (s Symbols) Lookup(o uint32) (string, error) { d := encoding.Decbuf{ B: s.bs.Range(0, s.bs.Len()), } @@ -1307,9 +1307,6 @@ func (s Symbols) Lookup(ctx context.Context, o uint32) (string, error) { d.Skip(s.offsets[int(o/symbolFactor)]) // Walk until we find the one we want. for i := o - (o / symbolFactor * symbolFactor); i > 0; i-- { - if ctx.Err() != nil { - return "", ctx.Err() - } d.UvarintBytes() } } else { @@ -1441,7 +1438,7 @@ func (r *Reader) lookupSymbol(ctx context.Context, o uint32) (string, error) { if s, ok := r.nameSymbols[o]; ok { return s, nil } - return r.symbols.Lookup(ctx, o) + return r.symbols.Lookup(o) } // Symbols returns an iterator over the symbols that exist within the index. diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go index abc147797..7a6683da2 100644 --- a/tsdb/index/index_test.go +++ b/tsdb/index/index_test.go @@ -519,7 +519,6 @@ func TestNewFileReaderErrorNoOpenFiles(t *testing.T) { } func TestSymbols(t *testing.T) { - ctx := context.Background() buf := encoding.Encbuf{} // Add prefix to the buffer to simulate symbols as part of larger buffer. @@ -542,11 +541,11 @@ func TestSymbols(t *testing.T) { require.Equal(t, 32, s.Size()) for i := 99; i >= 0; i-- { - s, err := s.Lookup(ctx, uint32(i)) + s, err := s.Lookup(uint32(i)) require.NoError(t, err) require.Equal(t, string(rune(i)), s) } - _, err = s.Lookup(ctx, 100) + _, err = s.Lookup(100) require.Error(t, err) for i := 99; i >= 0; i-- { From ab2a7bb74fa32f9199e3b9d4a19c000c304a37e9 Mon Sep 17 00:00:00 2001 From: Ziqi Zhao Date: Wed, 8 Nov 2023 21:43:05 +0800 Subject: [PATCH 556/632] add generic shrink function (#13001) Add `ReduceResolution` method to `Histogram` and `FloatHistogram` This takes the original `mergeToSchema` function and turns it into a more generic `reduceResolution` function, which is the building block for the new methods. The methods will help with addressing #12864. --------- Signed-off-by: Ziqi Zhao --- model/histogram/float_histogram.go | 98 ++++++------------------------ model/histogram/generic.go | 87 ++++++++++++++++++++++++++ model/histogram/generic_test.go | 70 +++++++++++++++++++++ model/histogram/histogram.go | 12 ++++ 4 files changed, 186 insertions(+), 81 deletions(-) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 22d33f5a4..212b02880 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -94,8 +94,8 @@ func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram { Sum: h.Sum, } - c.PositiveSpans, c.PositiveBuckets = mergeToSchema(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema) - c.NegativeSpans, c.NegativeBuckets = mergeToSchema(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema) + c.PositiveSpans, c.PositiveBuckets = reduceResolution(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, false) + c.NegativeSpans, c.NegativeBuckets = reduceResolution(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, false) return &c } @@ -268,17 +268,12 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { h.Count += other.Count h.Sum += other.Sum - otherPositiveSpans := other.PositiveSpans - otherPositiveBuckets := other.PositiveBuckets - otherNegativeSpans := other.NegativeSpans - otherNegativeBuckets := other.NegativeBuckets if other.Schema != h.Schema { - otherPositiveSpans, otherPositiveBuckets = mergeToSchema(other.PositiveSpans, other.PositiveBuckets, other.Schema, h.Schema) - otherNegativeSpans, otherNegativeBuckets = mergeToSchema(other.NegativeSpans, other.NegativeBuckets, other.Schema, h.Schema) + other = other.ReduceResolution(h.Schema) } - h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) - h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.PositiveSpans, h.PositiveBuckets, other.PositiveSpans, other.PositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.NegativeSpans, h.NegativeBuckets, other.NegativeSpans, other.NegativeBuckets) return h } @@ -289,17 +284,12 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { h.Count -= other.Count h.Sum -= other.Sum - otherPositiveSpans := other.PositiveSpans - otherPositiveBuckets := other.PositiveBuckets - otherNegativeSpans := other.NegativeSpans - otherNegativeBuckets := other.NegativeBuckets if other.Schema != h.Schema { - otherPositiveSpans, otherPositiveBuckets = mergeToSchema(other.PositiveSpans, other.PositiveBuckets, other.Schema, h.Schema) - otherNegativeSpans, otherNegativeBuckets = mergeToSchema(other.NegativeSpans, other.NegativeBuckets, other.Schema, h.Schema) + other = other.ReduceResolution(h.Schema) } - h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) - h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.PositiveSpans, h.PositiveBuckets, other.PositiveSpans, other.PositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.NegativeSpans, h.NegativeBuckets, other.NegativeSpans, other.NegativeBuckets) return h } @@ -975,69 +965,6 @@ func targetIdx(idx, originSchema, targetSchema int32) int32 { return ((idx - 1) >> (originSchema - targetSchema)) + 1 } -// mergeToSchema is used to merge a FloatHistogram's Spans and Buckets (no matter if -// positive or negative) from the original schema to the target schema. -// The target schema must be smaller than the original schema. -func mergeToSchema(originSpans []Span, originBuckets []float64, originSchema, targetSchema int32) ([]Span, []float64) { - var ( - targetSpans []Span // The spans in the target schema. - targetBuckets []float64 // The buckets in the target schema. - bucketIdx int32 // The index of bucket in the origin schema. - lastTargetBucketIdx int32 // The index of the last added target bucket. - origBucketIdx int // The position of a bucket in originBuckets slice. - ) - - for _, span := range originSpans { - // Determine the index of the first bucket in this span. - bucketIdx += span.Offset - for j := 0; j < int(span.Length); j++ { - // Determine the index of the bucket in the target schema from the index in the original schema. - targetBucketIdx := targetIdx(bucketIdx, originSchema, targetSchema) - - switch { - case len(targetSpans) == 0: - // This is the first span in the targetSpans. - span := Span{ - Offset: targetBucketIdx, - Length: 1, - } - targetSpans = append(targetSpans, span) - targetBuckets = append(targetBuckets, originBuckets[0]) - lastTargetBucketIdx = targetBucketIdx - - case lastTargetBucketIdx == targetBucketIdx: - // The current bucket has to be merged into the same target bucket as the previous bucket. - targetBuckets[len(targetBuckets)-1] += originBuckets[origBucketIdx] - - case (lastTargetBucketIdx + 1) == targetBucketIdx: - // The current bucket has to go into a new target bucket, - // and that bucket is next to the previous target bucket, - // so we add it to the current target span. - targetSpans[len(targetSpans)-1].Length++ - targetBuckets = append(targetBuckets, originBuckets[origBucketIdx]) - lastTargetBucketIdx++ - - case (lastTargetBucketIdx + 1) < targetBucketIdx: - // The current bucket has to go into a new target bucket, - // and that bucket is separated by a gap from the previous target bucket, - // so we need to add a new target span. - span := Span{ - Offset: targetBucketIdx - lastTargetBucketIdx - 1, - Length: 1, - } - targetSpans = append(targetSpans, span) - targetBuckets = append(targetBuckets, originBuckets[origBucketIdx]) - lastTargetBucketIdx = targetBucketIdx - } - - bucketIdx++ - origBucketIdx++ - } - } - - return targetSpans, targetBuckets -} - // addBuckets adds the buckets described by spansB/bucketsB to the buckets described by spansA/bucketsA, // creating missing buckets in spansA/bucketsA as needed. // It returns the resulting spans/buckets (which must be used instead of the original spansA/bucketsA, @@ -1179,3 +1106,12 @@ func floatBucketsMatch(b1, b2 []float64) bool { } return true } + +// ReduceResolution reduces the float histogram's spans, buckets into target schema. +// The target schema must be smaller than the current float histogram's schema. +func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram { + h.PositiveSpans, h.PositiveBuckets = reduceResolution(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, false) + h.NegativeSpans, h.NegativeBuckets = reduceResolution(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, false) + + return h +} diff --git a/model/histogram/generic.go b/model/histogram/generic.go index 7e4eb1ecb..d42bb2415 100644 --- a/model/histogram/generic.go +++ b/model/histogram/generic.go @@ -600,3 +600,90 @@ var exponentialBounds = [][]float64{ 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, }, } + +// reduceResolution reduces the input spans, buckets in origin schema to the spans, buckets in target schema. +// The target schema must be smaller than the original schema. +// Set deltaBuckets to true if the provided buckets are +// deltas. Set it to false if the buckets contain absolute counts. +func reduceResolution[IBC InternalBucketCount](originSpans []Span, originBuckets []IBC, originSchema, targetSchema int32, deltaBuckets bool) ([]Span, []IBC) { + var ( + targetSpans []Span // The spans in the target schema. + targetBuckets []IBC // The bucket counts in the target schema. + bucketIdx int32 // The index of bucket in the origin schema. + bucketCountIdx int // The position of a bucket in origin bucket count slice `originBuckets`. + targetBucketIdx int32 // The index of bucket in the target schema. + lastBucketCount IBC // The last visited bucket's count in the origin schema. + lastTargetBucketIdx int32 // The index of the last added target bucket. + lastTargetBucketCount IBC + ) + + for _, span := range originSpans { + // Determine the index of the first bucket in this span. + bucketIdx += span.Offset + for j := 0; j < int(span.Length); j++ { + // Determine the index of the bucket in the target schema from the index in the original schema. + targetBucketIdx = targetIdx(bucketIdx, originSchema, targetSchema) + + switch { + case len(targetSpans) == 0: + // This is the first span in the targetSpans. + span := Span{ + Offset: targetBucketIdx, + Length: 1, + } + targetSpans = append(targetSpans, span) + targetBuckets = append(targetBuckets, originBuckets[bucketCountIdx]) + lastTargetBucketIdx = targetBucketIdx + lastBucketCount = originBuckets[bucketCountIdx] + lastTargetBucketCount = originBuckets[bucketCountIdx] + + case lastTargetBucketIdx == targetBucketIdx: + // The current bucket has to be merged into the same target bucket as the previous bucket. + if deltaBuckets { + lastBucketCount += originBuckets[bucketCountIdx] + targetBuckets[len(targetBuckets)-1] += lastBucketCount + lastTargetBucketCount += lastBucketCount + } else { + targetBuckets[len(targetBuckets)-1] += originBuckets[bucketCountIdx] + } + + case (lastTargetBucketIdx + 1) == targetBucketIdx: + // The current bucket has to go into a new target bucket, + // and that bucket is next to the previous target bucket, + // so we add it to the current target span. + targetSpans[len(targetSpans)-1].Length++ + lastTargetBucketIdx++ + if deltaBuckets { + lastBucketCount += originBuckets[bucketCountIdx] + targetBuckets = append(targetBuckets, lastBucketCount-lastTargetBucketCount) + lastTargetBucketCount = lastBucketCount + } else { + targetBuckets = append(targetBuckets, originBuckets[bucketCountIdx]) + } + + case (lastTargetBucketIdx + 1) < targetBucketIdx: + // The current bucket has to go into a new target bucket, + // and that bucket is separated by a gap from the previous target bucket, + // so we need to add a new target span. + span := Span{ + Offset: targetBucketIdx - lastTargetBucketIdx - 1, + Length: 1, + } + targetSpans = append(targetSpans, span) + lastTargetBucketIdx = targetBucketIdx + if deltaBuckets { + lastBucketCount += originBuckets[bucketCountIdx] + targetBuckets = append(targetBuckets, lastBucketCount-lastTargetBucketCount) + lastTargetBucketCount = lastBucketCount + } else { + targetBuckets = append(targetBuckets, originBuckets[bucketCountIdx]) + } + } + + bucketIdx++ + bucketCountIdx++ + } + } + + return targetSpans, targetBuckets +} diff --git a/model/histogram/generic_test.go b/model/histogram/generic_test.go index 55015c047..d24910d21 100644 --- a/model/histogram/generic_test.go +++ b/model/histogram/generic_test.go @@ -110,3 +110,73 @@ func TestGetBound(t *testing.T) { } } } + +func TestReduceResolutionHistogram(t *testing.T) { + cases := []struct { + spans []Span + buckets []int64 + schema int32 + targetSchema int32 + expectedSpans []Span + expectedBuckets []int64 + }{ + { + spans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 3, Length: 2}, + }, + buckets: []int64{1, 2, -2, 1, -1, 0}, + schema: 0, + targetSchema: -1, + expectedSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 1}, + }, + expectedBuckets: []int64{1, 3, -2, 0}, + // schema 0, base 2 { (0.5, 1]:1 (1,2]:3, (2,4]:1, (4,8]:2, (8,16]:0, (16,32]:0, (32,64]:0, (64,128]:1, (128,256]:1}", + // schema 1, base 4 { (0.25, 1):1 (1,4]:4, (4,16]:2, (16,64]:0, (64,256]:2} + }, + } + + for _, tc := range cases { + spans, buckets := reduceResolution(tc.spans, tc.buckets, tc.schema, tc.targetSchema, true) + require.Equal(t, tc.expectedSpans, spans) + require.Equal(t, tc.expectedBuckets, buckets) + } +} + +func TestReduceResolutionFloatHistogram(t *testing.T) { + cases := []struct { + spans []Span + buckets []float64 + schema int32 + targetSchema int32 + expectedSpans []Span + expectedBuckets []float64 + }{ + { + spans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 3, Length: 2}, + }, + buckets: []float64{1, 3, 1, 2, 1, 1}, + schema: 0, + targetSchema: -1, + expectedSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 1}, + }, + expectedBuckets: []float64{1, 4, 2, 2}, + // schema 0, base 2 { (0.5, 1]:1 (1,2]:3, (2,4]:1, (4,8]:2, (8,16]:0, (16,32]:0, (32,64]:0, (64,128]:1, (128,256]:1}", + // schema 1, base 4 { (0.25, 1):1 (1,4]:4, (4,16]:2, (16,64]:0, (64,256]:2} + }, + } + + for _, tc := range cases { + spans, buckets := reduceResolution(tc.spans, tc.buckets, tc.schema, tc.targetSchema, false) + require.Equal(t, tc.expectedSpans, spans) + require.Equal(t, tc.expectedBuckets, buckets) + } +} diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go index 30c23e5e7..4699bd3cb 100644 --- a/model/histogram/histogram.go +++ b/model/histogram/histogram.go @@ -493,3 +493,15 @@ func (c *cumulativeBucketIterator) At() Bucket[uint64] { Index: c.currIdx - 1, } } + +// ReduceResolution reduces the histogram's spans, buckets into target schema. +// The target schema must be smaller than the current histogram's schema. +func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram { + h.PositiveSpans, h.PositiveBuckets = reduceResolution( + h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, true, + ) + h.NegativeSpans, h.NegativeBuckets = reduceResolution( + h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, true, + ) + return h +} From 443867f1aa07ca76c95ed5e7003edc18e73971c9 Mon Sep 17 00:00:00 2001 From: songjiayang Date: Thu, 9 Nov 2023 00:42:50 +0800 Subject: [PATCH 557/632] symbolCacheEntry field type alignment, thus saving 8 bytes. Signed-off-by: songjiayang --- tsdb/index/index.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 893167c25..068090603 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -107,8 +107,8 @@ func newCRC32() hash.Hash32 { type symbolCacheEntry struct { index uint32 - lastValue string lastValueIndex uint32 + lastValue string } // Writer implements the IndexWriter interface for the standard @@ -457,8 +457,8 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... } w.symbolCache[l.Name] = symbolCacheEntry{ index: nameIndex, - lastValue: l.Value, lastValueIndex: valueIndex, + lastValue: l.Value, } } w.buf2.PutUvarint32(valueIndex) From fb48a351f0b274344eb92b1994d6302e4488f83d Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Wed, 8 Nov 2023 21:45:14 +0100 Subject: [PATCH 558/632] tsdb/wlog: use Go standard errors package Signed-off-by: Matthieu MOREL --- tsdb/wlog/checkpoint.go | 46 ++++++++++++++++++------------------ tsdb/wlog/checkpoint_test.go | 3 +-- tsdb/wlog/live_reader.go | 10 ++++---- tsdb/wlog/reader.go | 17 ++++++------- tsdb/wlog/wlog.go | 40 +++++++++++++++---------------- 5 files changed, 57 insertions(+), 59 deletions(-) diff --git a/tsdb/wlog/checkpoint.go b/tsdb/wlog/checkpoint.go index d64599c27..3d5b56da2 100644 --- a/tsdb/wlog/checkpoint.go +++ b/tsdb/wlog/checkpoint.go @@ -15,6 +15,7 @@ package wlog import ( + "errors" "fmt" "io" "math" @@ -25,7 +26,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "golang.org/x/exp/slices" "github.com/prometheus/prometheus/tsdb/chunks" @@ -102,8 +102,8 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head { var sgmRange []SegmentRange dir, idx, err := LastCheckpoint(w.Dir()) - if err != nil && err != record.ErrNotFound { - return nil, errors.Wrap(err, "find last checkpoint") + if err != nil && !errors.Is(err, record.ErrNotFound) { + return nil, fmt.Errorf("find last checkpoint: %w", err) } last := idx + 1 if err == nil { @@ -119,7 +119,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head sgmRange = append(sgmRange, SegmentRange{Dir: w.Dir(), First: from, Last: to}) sgmReader, err = NewSegmentsRangeReader(sgmRange...) if err != nil { - return nil, errors.Wrap(err, "create segment reader") + return nil, fmt.Errorf("create segment reader: %w", err) } defer sgmReader.Close() } @@ -128,15 +128,15 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head cpdirtmp := cpdir + ".tmp" if err := os.RemoveAll(cpdirtmp); err != nil { - return nil, errors.Wrap(err, "remove previous temporary checkpoint dir") + return nil, fmt.Errorf("remove previous temporary checkpoint dir: %w", err) } if err := os.MkdirAll(cpdirtmp, 0o777); err != nil { - return nil, errors.Wrap(err, "create checkpoint dir") + return nil, fmt.Errorf("create checkpoint dir: %w", err) } cp, err := New(nil, nil, cpdirtmp, w.CompressionType()) if err != nil { - return nil, errors.Wrap(err, "open checkpoint") + return nil, fmt.Errorf("open checkpoint: %w", err) } // Ensures that an early return caused by an error doesn't leave any tmp files. @@ -174,7 +174,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head case record.Series: series, err = dec.Series(rec, series) if err != nil { - return nil, errors.Wrap(err, "decode series") + return nil, fmt.Errorf("decode series: %w", err) } // Drop irrelevant series in place. repl := series[:0] @@ -192,7 +192,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head case record.Samples: samples, err = dec.Samples(rec, samples) if err != nil { - return nil, errors.Wrap(err, "decode samples") + return nil, fmt.Errorf("decode samples: %w", err) } // Drop irrelevant samples in place. repl := samples[:0] @@ -210,7 +210,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head case record.HistogramSamples: histogramSamples, err = dec.HistogramSamples(rec, histogramSamples) if err != nil { - return nil, errors.Wrap(err, "decode histogram samples") + return nil, fmt.Errorf("decode histogram samples: %w", err) } // Drop irrelevant histogramSamples in place. repl := histogramSamples[:0] @@ -228,7 +228,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head case record.Tombstones: tstones, err = dec.Tombstones(rec, tstones) if err != nil { - return nil, errors.Wrap(err, "decode deletes") + return nil, fmt.Errorf("decode deletes: %w", err) } // Drop irrelevant tombstones in place. repl := tstones[:0] @@ -249,7 +249,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head case record.Exemplars: exemplars, err = dec.Exemplars(rec, exemplars) if err != nil { - return nil, errors.Wrap(err, "decode exemplars") + return nil, fmt.Errorf("decode exemplars: %w", err) } // Drop irrelevant exemplars in place. repl := exemplars[:0] @@ -266,7 +266,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head case record.Metadata: metadata, err := dec.Metadata(rec, metadata) if err != nil { - return nil, errors.Wrap(err, "decode metadata") + return nil, fmt.Errorf("decode metadata: %w", err) } // Only keep reference to the latest found metadata for each refID. repl := 0 @@ -292,7 +292,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head // Flush records in 1 MB increments. if len(buf) > 1*1024*1024 { if err := cp.Log(recs...); err != nil { - return nil, errors.Wrap(err, "flush records") + return nil, fmt.Errorf("flush records: %w", err) } buf, recs = buf[:0], recs[:0] } @@ -300,12 +300,12 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head // If we hit any corruption during checkpointing, repairing is not an option. // The head won't know which series records are lost. if r.Err() != nil { - return nil, errors.Wrap(r.Err(), "read segments") + return nil, fmt.Errorf("read segments: %w", r.Err()) } // Flush remaining records. if err := cp.Log(recs...); err != nil { - return nil, errors.Wrap(err, "flush records") + return nil, fmt.Errorf("flush records: %w", err) } // Flush latest metadata records for each series. @@ -315,29 +315,29 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head latestMetadata = append(latestMetadata, m) } if err := cp.Log(enc.Metadata(latestMetadata, buf[:0])); err != nil { - return nil, errors.Wrap(err, "flush metadata records") + return nil, fmt.Errorf("flush metadata records: %w", err) } } if err := cp.Close(); err != nil { - return nil, errors.Wrap(err, "close checkpoint") + return nil, fmt.Errorf("close checkpoint: %w", err) } // Sync temporary directory before rename. df, err := fileutil.OpenDir(cpdirtmp) if err != nil { - return nil, errors.Wrap(err, "open temporary checkpoint directory") + return nil, fmt.Errorf("open temporary checkpoint directory: %w", err) } if err := df.Sync(); err != nil { df.Close() - return nil, errors.Wrap(err, "sync temporary checkpoint directory") + return nil, fmt.Errorf("sync temporary checkpoint directory: %w", err) } if err = df.Close(); err != nil { - return nil, errors.Wrap(err, "close temporary checkpoint directory") + return nil, fmt.Errorf("close temporary checkpoint directory: %w", err) } if err := fileutil.Replace(cpdirtmp, cpdir); err != nil { - return nil, errors.Wrap(err, "rename checkpoint directory") + return nil, fmt.Errorf("rename checkpoint directory: %w", err) } return stats, nil @@ -364,7 +364,7 @@ func listCheckpoints(dir string) (refs []checkpointRef, err error) { continue } if !fi.IsDir() { - return nil, errors.Errorf("checkpoint %s is not a directory", fi.Name()) + return nil, fmt.Errorf("checkpoint %s is not a directory", fi.Name()) } idx, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):]) if err != nil { diff --git a/tsdb/wlog/checkpoint_test.go b/tsdb/wlog/checkpoint_test.go index 704a65cc1..381e09186 100644 --- a/tsdb/wlog/checkpoint_test.go +++ b/tsdb/wlog/checkpoint_test.go @@ -23,7 +23,6 @@ import ( "testing" "github.com/go-kit/log" - "github.com/pkg/errors" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" @@ -325,7 +324,7 @@ func TestCheckpointNoTmpFolderAfterError(t *testing.T) { // Walk the wlog dir to make sure there are no tmp folder left behind after the error. err = filepath.Walk(w.Dir(), func(path string, info os.FileInfo, err error) error { if err != nil { - return errors.Wrapf(err, "access err %q: %v", path, err) + return fmt.Errorf("access err %q: %w", path, err) } if info.IsDir() && strings.HasSuffix(info.Name(), ".tmp") { return fmt.Errorf("wlog dir contains temporary folder:%s", info.Name()) diff --git a/tsdb/wlog/live_reader.go b/tsdb/wlog/live_reader.go index a440eedf7..905bbf00d 100644 --- a/tsdb/wlog/live_reader.go +++ b/tsdb/wlog/live_reader.go @@ -16,6 +16,7 @@ package wlog import ( "encoding/binary" + "errors" "fmt" "hash/crc32" "io" @@ -24,7 +25,6 @@ import ( "github.com/go-kit/log/level" "github.com/golang/snappy" "github.com/klauspost/compress/zstd" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" ) @@ -135,7 +135,7 @@ func (r *LiveReader) Next() bool { switch ok, err := r.buildRecord(); { case ok: return true - case err != nil && err != io.EOF: + case err != nil && !errors.Is(err, io.EOF): r.err = err return false } @@ -157,7 +157,7 @@ func (r *LiveReader) Next() bool { if r.writeIndex != pageSize { n, err := r.fillBuffer() - if n == 0 || (err != nil && err != io.EOF) { + if n == 0 || (err != nil && !errors.Is(err, io.EOF)) { r.err = err return false } @@ -265,7 +265,7 @@ func validateRecord(typ recType, i int) error { } return nil default: - return errors.Errorf("unexpected record type %d", typ) + return fmt.Errorf("unexpected record type %d", typ) } } @@ -322,7 +322,7 @@ func (r *LiveReader) readRecord() ([]byte, int, error) { rec := r.buf[r.readIndex+recordHeaderSize : r.readIndex+recordHeaderSize+length] if c := crc32.Checksum(rec, castagnoliTable); c != crc { - return nil, 0, errors.Errorf("unexpected checksum %x, expected %x", c, crc) + return nil, 0, fmt.Errorf("unexpected checksum %x, expected %x", c, crc) } return rec, length + recordHeaderSize, nil diff --git a/tsdb/wlog/reader.go b/tsdb/wlog/reader.go index f77b03b8e..a744b0cc4 100644 --- a/tsdb/wlog/reader.go +++ b/tsdb/wlog/reader.go @@ -16,12 +16,13 @@ package wlog import ( "encoding/binary" + "errors" + "fmt" "hash/crc32" "io" "github.com/golang/snappy" "github.com/klauspost/compress/zstd" - "github.com/pkg/errors" ) // Reader reads WAL records from an io.Reader. @@ -47,7 +48,7 @@ func NewReader(r io.Reader) *Reader { // It must not be called again after it returned false. func (r *Reader) Next() bool { err := r.next() - if errors.Is(err, io.EOF) { + if err != nil && errors.Is(err, io.EOF) { // The last WAL segment record shouldn't be torn(should be full or last). // The last record would be torn after a crash just before // the last record part could be persisted to disk. @@ -72,7 +73,7 @@ func (r *Reader) next() (err error) { i := 0 for { if _, err = io.ReadFull(r.rdr, hdr[:1]); err != nil { - return errors.Wrap(err, "read first header byte") + return fmt.Errorf("read first header byte: %w", err) } r.total++ r.curRecTyp = recTypeFromHeader(hdr[0]) @@ -95,7 +96,7 @@ func (r *Reader) next() (err error) { } n, err := io.ReadFull(r.rdr, buf[:k]) if err != nil { - return errors.Wrap(err, "read remaining zeros") + return fmt.Errorf("read remaining zeros: %w", err) } r.total += int64(n) @@ -108,7 +109,7 @@ func (r *Reader) next() (err error) { } n, err := io.ReadFull(r.rdr, hdr[1:]) if err != nil { - return errors.Wrap(err, "read remaining header") + return fmt.Errorf("read remaining header: %w", err) } r.total += int64(n) @@ -118,7 +119,7 @@ func (r *Reader) next() (err error) { ) if length > pageSize-recordHeaderSize { - return errors.Errorf("invalid record size %d", length) + return fmt.Errorf("invalid record size %d", length) } n, err = io.ReadFull(r.rdr, buf[:length]) if err != nil { @@ -127,10 +128,10 @@ func (r *Reader) next() (err error) { r.total += int64(n) if n != int(length) { - return errors.Errorf("invalid size: expected %d, got %d", length, n) + return fmt.Errorf("invalid size: expected %d, got %d", length, n) } if c := crc32.Checksum(buf[:length], castagnoliTable); c != crc { - return errors.Errorf("unexpected checksum %x, expected %x", c, crc) + return fmt.Errorf("unexpected checksum %x, expected %x", c, crc) } if isSnappyCompressed || isZstdCompressed { diff --git a/tsdb/wlog/wlog.go b/tsdb/wlog/wlog.go index 16924d249..c4305bcbf 100644 --- a/tsdb/wlog/wlog.go +++ b/tsdb/wlog/wlog.go @@ -17,6 +17,7 @@ package wlog import ( "bufio" "encoding/binary" + "errors" "fmt" "hash/crc32" "io" @@ -30,7 +31,6 @@ import ( "github.com/go-kit/log/level" "github.com/golang/snappy" "github.com/klauspost/compress/zstd" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "golang.org/x/exp/slices" @@ -137,7 +137,7 @@ func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { level.Warn(logger).Log("msg", "Last page of the wlog is torn, filling it with zeros", "segment", segName) if _, err := f.Write(make([]byte, pageSize-d)); err != nil { f.Close() - return nil, errors.Wrap(err, "zero-pad torn page") + return nil, fmt.Errorf("zero-pad torn page: %w", err) } } return &Segment{SegmentFile: f, i: k, dir: dir}, nil @@ -298,7 +298,7 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi return nil, errors.New("invalid segment size") } if err := os.MkdirAll(dir, 0o777); err != nil { - return nil, errors.Wrap(err, "create dir") + return nil, fmt.Errorf("create dir: %w", err) } if logger == nil { logger = log.NewNopLogger() @@ -331,7 +331,7 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi _, last, err := Segments(w.Dir()) if err != nil { - return nil, errors.Wrap(err, "get segment range") + return nil, fmt.Errorf("get segment range: %w", err) } // Index of the Segment we want to open and write to. @@ -414,11 +414,9 @@ func (w *WL) Repair(origErr error) error { // But that's not generally applicable if the records have any kind of causality. // Maybe as an extra mode in the future if mid-WAL corruptions become // a frequent concern. - err := errors.Cause(origErr) // So that we can pick up errors even if wrapped. - - cerr, ok := err.(*CorruptionErr) - if !ok { - return errors.Wrap(origErr, "cannot handle error") + var cerr *CorruptionErr + if !errors.As(origErr, &cerr) { + return fmt.Errorf("cannot handle error: %w", origErr) } if cerr.Segment < 0 { return errors.New("corruption error does not specify position") @@ -429,7 +427,7 @@ func (w *WL) Repair(origErr error) error { // All segments behind the corruption can no longer be used. segs, err := listSegments(w.Dir()) if err != nil { - return errors.Wrap(err, "list segments") + return fmt.Errorf("list segments: %w", err) } level.Warn(w.logger).Log("msg", "Deleting all segments newer than corrupted segment", "segment", cerr.Segment) @@ -440,14 +438,14 @@ func (w *WL) Repair(origErr error) error { // as we set the current segment to repaired file // below. if err := w.segment.Close(); err != nil { - return errors.Wrap(err, "close active segment") + return fmt.Errorf("close active segment: %w", err) } } if s.index <= cerr.Segment { continue } if err := os.Remove(filepath.Join(w.Dir(), s.name)); err != nil { - return errors.Wrapf(err, "delete segment:%v", s.index) + return fmt.Errorf("delete segment:%v: %w", s.index, err) } } // Regardless of the corruption offset, no record reaches into the previous segment. @@ -472,7 +470,7 @@ func (w *WL) Repair(origErr error) error { f, err := os.Open(tmpfn) if err != nil { - return errors.Wrap(err, "open segment") + return fmt.Errorf("open segment: %w", err) } defer f.Close() @@ -484,24 +482,24 @@ func (w *WL) Repair(origErr error) error { break } if err := w.Log(r.Record()); err != nil { - return errors.Wrap(err, "insert record") + return fmt.Errorf("insert record: %w", err) } } // We expect an error here from r.Err(), so nothing to handle. // We need to pad to the end of the last page in the repaired segment if err := w.flushPage(true); err != nil { - return errors.Wrap(err, "flush page in repair") + return fmt.Errorf("flush page in repair: %w", err) } // We explicitly close even when there is a defer for Windows to be // able to delete it. The defer is in place to close it in-case there // are errors above. if err := f.Close(); err != nil { - return errors.Wrap(err, "close corrupted file") + return fmt.Errorf("close corrupted file: %w", err) } if err := os.Remove(tmpfn); err != nil { - return errors.Wrap(err, "delete corrupted segment") + return fmt.Errorf("delete corrupted segment: %w", err) } // Explicitly close the segment we just repaired to avoid issues with Windows. @@ -553,7 +551,7 @@ func (w *WL) nextSegment(async bool) (int, error) { } next, err := CreateSegment(w.Dir(), w.segment.Index()+1) if err != nil { - return 0, errors.Wrap(err, "create new segment file") + return 0, fmt.Errorf("create new segment file: %w", err) } prev := w.segment if err := w.setSegment(next); err != nil { @@ -940,7 +938,7 @@ func NewSegmentsRangeReader(sr ...SegmentRange) (io.ReadCloser, error) { for _, sgmRange := range sr { refs, err := listSegments(sgmRange.Dir) if err != nil { - return nil, errors.Wrapf(err, "list segment in dir:%v", sgmRange.Dir) + return nil, fmt.Errorf("list segment in dir:%v: %w", sgmRange.Dir, err) } for _, r := range refs { @@ -952,7 +950,7 @@ func NewSegmentsRangeReader(sr ...SegmentRange) (io.ReadCloser, error) { } s, err := OpenReadSegment(filepath.Join(sgmRange.Dir, r.name)) if err != nil { - return nil, errors.Wrapf(err, "open segment:%v in dir:%v", r.name, sgmRange.Dir) + return nil, fmt.Errorf("open segment:%v in dir:%v: %w", r.name, sgmRange.Dir, err) } segs = append(segs, s) } @@ -1017,7 +1015,7 @@ func (r *segmentBufReader) Read(b []byte) (n int, err error) { r.off += n // If we succeeded, or hit a non-EOF, we can stop. - if err == nil || err != io.EOF { + if err == nil || !errors.Is(err, io.EOF) { return n, err } From a32fbc3658016a92834417695e80f511ed576ce1 Mon Sep 17 00:00:00 2001 From: machine424 Date: Wed, 8 Nov 2023 14:19:12 +0100 Subject: [PATCH 559/632] head.go: Remove an unneeded snapshot trigger that was moved in https://github.com/prometheus/prometheus/pull/9328 and brougt back by mistake in 095f572d4a855fa5c3492fd98c0459abbff91a07 as part of https://github.com/prometheus/prometheus/pull/11447 Signed-off-by: machine424 --- tsdb/head.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/tsdb/head.go b/tsdb/head.go index ee0ffcb8d..d096bc631 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -1593,9 +1593,6 @@ func (h *Head) Close() error { h.mmapHeadChunks() errs := tsdb_errors.NewMulti(h.chunkDiskMapper.Close()) - if errs.Err() == nil && h.opts.EnableMemorySnapshotOnShutdown { - errs.Add(h.performChunkSnapshot()) - } if h.wal != nil { errs.Add(h.wal.Close()) } From 071d5732afdb68fce428cb985ea13e176b9a876e Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 27 Oct 2023 21:41:04 +0100 Subject: [PATCH 560/632] TSDB: refactor cleanup of chunks and series Extract the middle of the loop into a function, so it will be easier to modify the `seriesHashmap` data structure. Signed-off-by: Bryan Boreham --- tsdb/head.go | 119 ++++++++++++++++++++++++++------------------------- 1 file changed, 61 insertions(+), 58 deletions(-) diff --git a/tsdb/head.go b/tsdb/head.go index ee0ffcb8d..c46ffe061 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -1777,70 +1777,73 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) ( deletedFromPrevStripe = 0 ) minMmapFile = math.MaxInt32 - // Run through all series and truncate old chunks. Mark those with no - // chunks left as deleted and store their ID. + + // For one series, truncate old chunks and check if any chunks left. If not, mark as deleted and collect the ID. + check := func(i int, hash uint64, series *memSeries, deletedForCallback map[chunks.HeadSeriesRef]labels.Labels) { + series.Lock() + defer series.Unlock() + + rmChunks += series.truncateChunksBefore(mint, minOOOMmapRef) + + if len(series.mmappedChunks) > 0 { + seq, _ := series.mmappedChunks[0].ref.Unpack() + if seq < minMmapFile { + minMmapFile = seq + } + } + if series.ooo != nil && len(series.ooo.oooMmappedChunks) > 0 { + seq, _ := series.ooo.oooMmappedChunks[0].ref.Unpack() + if seq < minMmapFile { + minMmapFile = seq + } + for _, ch := range series.ooo.oooMmappedChunks { + if ch.minTime < minOOOTime { + minOOOTime = ch.minTime + } + } + } + if series.ooo != nil && series.ooo.oooHeadChunk != nil { + if series.ooo.oooHeadChunk.minTime < minOOOTime { + minOOOTime = series.ooo.oooHeadChunk.minTime + } + } + if len(series.mmappedChunks) > 0 || series.headChunks != nil || series.pendingCommit || + (series.ooo != nil && (len(series.ooo.oooMmappedChunks) > 0 || series.ooo.oooHeadChunk != nil)) { + seriesMint := series.minTime() + if seriesMint < actualMint { + actualMint = seriesMint + } + return + } + // The series is gone entirely. We need to keep the series lock + // and make sure we have acquired the stripe locks for hash and ID of the + // series alike. + // If we don't hold them all, there's a very small chance that a series receives + // samples again while we are half-way into deleting it. + j := int(series.ref) & (s.size - 1) + + if i != j { + s.locks[j].Lock() + } + + deleted[storage.SeriesRef(series.ref)] = struct{}{} + s.hashes[i].del(hash, series.lset) + delete(s.series[j], series.ref) + deletedForCallback[series.ref] = series.lset + + if i != j { + s.locks[j].Unlock() + } + } + + // Run through all series shard by shard, checking which should be deleted. for i := 0; i < s.size; i++ { deletedForCallback := make(map[chunks.HeadSeriesRef]labels.Labels, deletedFromPrevStripe) s.locks[i].Lock() for hash, all := range s.hashes[i] { for _, series := range all { - series.Lock() - rmChunks += series.truncateChunksBefore(mint, minOOOMmapRef) - - if len(series.mmappedChunks) > 0 { - seq, _ := series.mmappedChunks[0].ref.Unpack() - if seq < minMmapFile { - minMmapFile = seq - } - } - if series.ooo != nil && len(series.ooo.oooMmappedChunks) > 0 { - seq, _ := series.ooo.oooMmappedChunks[0].ref.Unpack() - if seq < minMmapFile { - minMmapFile = seq - } - for _, ch := range series.ooo.oooMmappedChunks { - if ch.minTime < minOOOTime { - minOOOTime = ch.minTime - } - } - } - if series.ooo != nil && series.ooo.oooHeadChunk != nil { - if series.ooo.oooHeadChunk.minTime < minOOOTime { - minOOOTime = series.ooo.oooHeadChunk.minTime - } - } - if len(series.mmappedChunks) > 0 || series.headChunks != nil || series.pendingCommit || - (series.ooo != nil && (len(series.ooo.oooMmappedChunks) > 0 || series.ooo.oooHeadChunk != nil)) { - seriesMint := series.minTime() - if seriesMint < actualMint { - actualMint = seriesMint - } - series.Unlock() - continue - } - - // The series is gone entirely. We need to keep the series lock - // and make sure we have acquired the stripe locks for hash and ID of the - // series alike. - // If we don't hold them all, there's a very small chance that a series receives - // samples again while we are half-way into deleting it. - j := int(series.ref) & (s.size - 1) - - if i != j { - s.locks[j].Lock() - } - - deleted[storage.SeriesRef(series.ref)] = struct{}{} - s.hashes[i].del(hash, series.lset) - delete(s.series[j], series.ref) - deletedForCallback[series.ref] = series.lset - - if i != j { - s.locks[j].Unlock() - } - - series.Unlock() + check(i, hash, series, deletedForCallback) } } From ce4e757704866ad16bd456e1140cdfe45b380601 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 27 Oct 2023 22:02:34 +0100 Subject: [PATCH 561/632] TSDB: refine variable naming in chunk gc Slight further refactor. Signed-off-by: Bryan Boreham --- tsdb/head.go | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/tsdb/head.go b/tsdb/head.go index c46ffe061..575177a7a 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -1779,7 +1779,7 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) ( minMmapFile = math.MaxInt32 // For one series, truncate old chunks and check if any chunks left. If not, mark as deleted and collect the ID. - check := func(i int, hash uint64, series *memSeries, deletedForCallback map[chunks.HeadSeriesRef]labels.Labels) { + check := func(hashShard int, hash uint64, series *memSeries, deletedForCallback map[chunks.HeadSeriesRef]labels.Labels) { series.Lock() defer series.Unlock() @@ -1820,20 +1820,16 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) ( // series alike. // If we don't hold them all, there's a very small chance that a series receives // samples again while we are half-way into deleting it. - j := int(series.ref) & (s.size - 1) - - if i != j { - s.locks[j].Lock() + refShard := int(series.ref) & (s.size - 1) + if hashShard != refShard { + s.locks[refShard].Lock() + defer s.locks[refShard].Unlock() } deleted[storage.SeriesRef(series.ref)] = struct{}{} - s.hashes[i].del(hash, series.lset) - delete(s.series[j], series.ref) + s.hashes[hashShard].del(hash, series.lset) + delete(s.series[refShard], series.ref) deletedForCallback[series.ref] = series.lset - - if i != j { - s.locks[j].Unlock() - } } // Run through all series shard by shard, checking which should be deleted. From e6c0f69f98f1125fdb9f4db8fc6f5d13629fad28 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 27 Oct 2023 22:05:30 +0100 Subject: [PATCH 562/632] TSDB: Only pay for hash collisions when they happen Instead of a map of slices of `*memSeries`, ready for any of them to hold series where hash values collide, split into a map of `*memSeries` and a map of slices which is usually empty, since hash collisions are a one-in-a-billion thing. The `del` method gets more complicated, to maintain the invariant that a series is only in one of the two maps. Signed-off-by: Bryan Boreham --- tsdb/head.go | 75 +++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 54 insertions(+), 21 deletions(-) diff --git a/tsdb/head.go b/tsdb/head.go index 575177a7a..410a226d8 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -1667,26 +1667,34 @@ func (h *Head) mmapHeadChunks() { var count int for i := 0; i < h.series.size; i++ { h.series.locks[i].RLock() - for _, all := range h.series.hashes[i] { - for _, series := range all { - series.Lock() - count += series.mmapChunks(h.chunkDiskMapper) - series.Unlock() - } + for _, series := range h.series.series[i] { + series.Lock() + count += series.mmapChunks(h.chunkDiskMapper) + series.Unlock() } h.series.locks[i].RUnlock() } h.metrics.mmapChunksTotal.Add(float64(count)) } -// seriesHashmap is a simple hashmap for memSeries by their label set. It is built -// on top of a regular hashmap and holds a slice of series to resolve hash collisions. +// seriesHashmap lets TSDB find a memSeries by its label set, via a 64-bit hash. +// There is one map for the common case where the hash value is unique, and a +// second map for the case that two series have the same hash value. +// Each series is in only one of the maps. // Its methods require the hash to be submitted with it to avoid re-computations throughout // the code. -type seriesHashmap map[uint64][]*memSeries +type seriesHashmap struct { + unique map[uint64]*memSeries + conflicts map[uint64][]*memSeries +} -func (m seriesHashmap) get(hash uint64, lset labels.Labels) *memSeries { - for _, s := range m[hash] { +func (m *seriesHashmap) get(hash uint64, lset labels.Labels) *memSeries { + if s, found := m.unique[hash]; found { + if labels.Equal(s.lset, lset) { + return s + } + } + for _, s := range m.conflicts[hash] { if labels.Equal(s.lset, lset) { return s } @@ -1695,27 +1703,46 @@ func (m seriesHashmap) get(hash uint64, lset labels.Labels) *memSeries { } func (m seriesHashmap) set(hash uint64, s *memSeries) { - l := m[hash] + if existing, found := m.unique[hash]; !found || labels.Equal(existing.lset, s.lset) { + m.unique[hash] = s + return + } + l := m.conflicts[hash] for i, prev := range l { if labels.Equal(prev.lset, s.lset) { l[i] = s return } } - m[hash] = append(l, s) + m.conflicts[hash] = append(l, s) } func (m seriesHashmap) del(hash uint64, lset labels.Labels) { var rem []*memSeries - for _, s := range m[hash] { - if !labels.Equal(s.lset, lset) { - rem = append(rem, s) + unique, found := m.unique[hash] + switch { + case !found: + return + case labels.Equal(unique.lset, lset): + conflicts := m.conflicts[hash] + if len(conflicts) == 0 { + delete(m.unique, hash) + return + } + rem = conflicts + default: + rem = append(rem, unique) + for _, s := range m.conflicts[hash] { + if !labels.Equal(s.lset, lset) { + rem = append(rem, s) + } } } - if len(rem) == 0 { - delete(m, hash) + m.unique[hash] = rem[0] + if len(rem) == 1 { + delete(m.conflicts, hash) } else { - m[hash] = rem + m.conflicts[hash] = rem[1:] } } @@ -1757,7 +1784,10 @@ func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *st s.series[i] = map[chunks.HeadSeriesRef]*memSeries{} } for i := range s.hashes { - s.hashes[i] = seriesHashmap{} + s.hashes[i] = seriesHashmap{ + unique: map[uint64]*memSeries{}, + conflicts: map[uint64][]*memSeries{}, + } } return s } @@ -1837,7 +1867,10 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) ( deletedForCallback := make(map[chunks.HeadSeriesRef]labels.Labels, deletedFromPrevStripe) s.locks[i].Lock() - for hash, all := range s.hashes[i] { + for hash, series := range s.hashes[i].unique { + check(i, hash, series, deletedForCallback) + } + for hash, all := range s.hashes[i].conflicts { for _, series := range all { check(i, hash, series, deletedForCallback) } From 0996b78326fd8d41ba32e4670085599b5e63f542 Mon Sep 17 00:00:00 2001 From: machine424 Date: Thu, 9 Nov 2023 15:38:35 +0100 Subject: [PATCH 563/632] remote_write: add a unit test to make sure the write client sends the extra http headers as expected This will help letting prometheus off the hook from situations like https://github.com/prometheus/prometheus/issues/13030 Signed-off-by: machine424 --- storage/remote/client_test.go | 40 +++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/storage/remote/client_test.go b/storage/remote/client_test.go index 33ae7e468..2acb8e279 100644 --- a/storage/remote/client_test.go +++ b/storage/remote/client_test.go @@ -168,3 +168,43 @@ func TestRetryAfterDuration(t *testing.T) { require.Equal(t, c.expected, retryAfterDuration(c.tInput), c.name) } } + +func TestClientHeaders(t *testing.T) { + headersToSend := map[string]string{"Foo": "Bar", "Baz": "qux"} + + var called bool + server := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called = true + receivedHeaders := r.Header + for name, value := range headersToSend { + require.Equal( + t, + []string{value}, + receivedHeaders.Values(name), + "expected %v to be part of the received headers %v", + headersToSend, + receivedHeaders, + ) + } + }), + ) + defer server.Close() + + serverURL, err := url.Parse(server.URL) + require.NoError(t, err) + + conf := &ClientConfig{ + URL: &config_util.URL{URL: serverURL}, + Timeout: model.Duration(time.Second), + Headers: headersToSend, + } + + c, err := NewWriteClient("c", conf) + require.NoError(t, err) + + err = c.Store(context.Background(), []byte{}, 0) + require.NoError(t, err) + + require.True(t, called, "The remote server wasn't called") +} From 0fe34f6d788be5ede6bc0d705b80343e12298b9c Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Thu, 9 Nov 2023 10:18:00 -0600 Subject: [PATCH 564/632] Follow-up to #13060: Add test to ensure staleness tracking This commit introduces an additional test in `scrape_test.go` to verify staleness tracking when `trackTimestampStaleness` is enabled. The new `TestScrapeLoopAppendStalenessIfTrackTimestampStaleness` function asserts that the scrape loop correctly appends staleness markers when necessary, reflecting the expected behavior with the feature flag turned on. The previous tests were only testing end of scrape staleness. Signed-off-by: Julien Pivotto --- scrape/scrape_test.go | 51 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index ccd651f49..a2e0d00c6 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -2038,6 +2038,57 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) { require.Equal(t, want, app.resultFloats, "Appended samples not as expected:\n%s", appender) } +func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T) { + app := &collectResultAppender{} + sl := newScrapeLoop(context.Background(), + nil, nil, nil, + nopMutator, + nopMutator, + func(ctx context.Context) storage.Appender { return app }, + nil, + 0, + true, + true, + 0, 0, + nil, + 0, + 0, + false, + false, + false, + nil, + false, + newTestScrapeMetrics(t), + ) + + now := time.Now() + slApp := sl.appender(context.Background()) + _, _, _, err := sl.append(slApp, []byte("metric_a 1 1000\n"), "", now) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + + slApp = sl.appender(context.Background()) + _, _, _, err = sl.append(slApp, []byte(""), "", now.Add(time.Second)) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + + // DeepEqual will report NaNs as being different, so replace with a different value. + app.resultFloats[1].f = 42 + want := []floatSample{ + { + metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), + t: 1000, + f: 1, + }, + { + metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), + t: timestamp.FromTime(now.Add(time.Second)), + f: 42, + }, + } + require.Equal(t, want, app.resultFloats, "Appended samples not as expected:\n%s", appender) +} + func TestScrapeLoopAppendExemplar(t *testing.T) { tests := []struct { title string From 2972cc5e8f655fbfb6a73e33465bfd0abb132835 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Thu, 9 Nov 2023 22:07:35 +0100 Subject: [PATCH 565/632] tsdb/index: use Go standard errors package Signed-off-by: Matthieu MOREL --- tsdb/index/index.go | 117 ++++++++++++++++++------------------ tsdb/index/index_test.go | 6 +- tsdb/index/postings.go | 4 +- tsdb/index/postings_test.go | 2 +- 4 files changed, 66 insertions(+), 63 deletions(-) diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 893167c25..74212bced 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -28,7 +28,6 @@ import ( "sort" "unsafe" - "github.com/pkg/errors" "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" @@ -172,7 +171,7 @@ func NewTOCFromByteSlice(bs ByteSlice) (*TOC, error) { d := encoding.Decbuf{B: b[:len(b)-4]} if d.Crc32(castagnoliTable) != expCRC { - return nil, errors.Wrap(encoding.ErrInvalidChecksum, "read TOC") + return nil, fmt.Errorf("read TOC: %w", encoding.ErrInvalidChecksum) } toc := &TOC{ @@ -197,7 +196,7 @@ func NewWriter(ctx context.Context, fn string) (*Writer, error) { defer df.Close() // Close for platform windows. if err := os.RemoveAll(fn); err != nil { - return nil, errors.Wrap(err, "remove any existing index at path") + return nil, fmt.Errorf("remove any existing index at path: %w", err) } // Main index file we are building. @@ -216,7 +215,7 @@ func NewWriter(ctx context.Context, fn string) (*Writer, error) { return nil, err } if err := df.Sync(); err != nil { - return nil, errors.Wrap(err, "sync dir") + return nil, fmt.Errorf("sync dir: %w", err) } iw := &Writer{ @@ -288,7 +287,7 @@ func (fw *FileWriter) Write(bufs ...[]byte) error { // Once we move to compressed/varint representations in those areas, this limitation // can be lifted. if fw.pos > 16*math.MaxUint32 { - return errors.Errorf("%q exceeding max size of 64GiB", fw.name) + return fmt.Errorf("%q exceeding max size of 64GiB", fw.name) } } return nil @@ -315,7 +314,7 @@ func (fw *FileWriter) AddPadding(size int) error { p = uint64(size) - p if err := fw.Write(make([]byte, p)); err != nil { - return errors.Wrap(err, "add padding") + return fmt.Errorf("add padding: %w", err) } return nil } @@ -353,7 +352,7 @@ func (w *Writer) ensureStage(s indexWriterStage) error { } } if w.stage > s { - return errors.Errorf("invalid stage %q, currently at %q", s, w.stage) + return fmt.Errorf("invalid stage %q, currently at %q", s, w.stage) } // Mark start of sections in table of contents. @@ -417,20 +416,20 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... return err } if labels.Compare(lset, w.lastSeries) <= 0 { - return errors.Errorf("out-of-order series added with label set %q", lset) + return fmt.Errorf("out-of-order series added with label set %q", lset) } if ref < w.lastRef && !w.lastSeries.IsEmpty() { - return errors.Errorf("series with reference greater than %d already added", ref) + return fmt.Errorf("series with reference greater than %d already added", ref) } // We add padding to 16 bytes to increase the addressable space we get through 4 byte // series references. if err := w.addPadding(16); err != nil { - return errors.Errorf("failed to write padding bytes: %v", err) + return fmt.Errorf("failed to write padding bytes: %v", err) } if w.f.pos%16 != 0 { - return errors.Errorf("series write not 16-byte aligned at %d", w.f.pos) + return fmt.Errorf("series write not 16-byte aligned at %d", w.f.pos) } w.buf2.Reset() @@ -443,7 +442,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... if !ok { nameIndex, err = w.symbols.ReverseLookup(l.Name) if err != nil { - return errors.Errorf("symbol entry for %q does not exist, %v", l.Name, err) + return fmt.Errorf("symbol entry for %q does not exist, %v", l.Name, err) } } w.labelNames[l.Name]++ @@ -453,7 +452,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... if !ok || cacheEntry.lastValue != l.Value { valueIndex, err = w.symbols.ReverseLookup(l.Value) if err != nil { - return errors.Errorf("symbol entry for %q does not exist, %v", l.Value, err) + return fmt.Errorf("symbol entry for %q does not exist, %v", l.Value, err) } w.symbolCache[l.Name] = symbolCacheEntry{ index: nameIndex, @@ -493,7 +492,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... w.buf2.PutHash(w.crc32) if err := w.write(w.buf1.Get(), w.buf2.Get()); err != nil { - return errors.Wrap(err, "write series data") + return fmt.Errorf("write series data: %w", err) } w.lastSeries.CopyFrom(lset) @@ -514,7 +513,7 @@ func (w *Writer) AddSymbol(sym string) error { return err } if w.numSymbols != 0 && sym <= w.lastSymbol { - return errors.Errorf("symbol %q out-of-order", sym) + return fmt.Errorf("symbol %q out-of-order", sym) } w.lastSymbol = sym w.numSymbols++ @@ -527,7 +526,7 @@ func (w *Writer) finishSymbols() error { symbolTableSize := w.f.pos - w.toc.Symbols - 4 // The symbol table's part is 4 bytes. So the total symbol table size must be less than or equal to 2^32-1 if symbolTableSize > math.MaxUint32 { - return errors.Errorf("symbol table size exceeds %d bytes: %d", uint32(math.MaxUint32), symbolTableSize) + return fmt.Errorf("symbol table size exceeds %d bytes: %d", uint32(math.MaxUint32), symbolTableSize) } // Write out the length and symbol count. @@ -563,7 +562,7 @@ func (w *Writer) finishSymbols() error { // Load in the symbol table efficiently for the rest of the index writing. w.symbols, err = NewSymbols(realByteSlice(w.symbolFile.Bytes()), FormatV2, int(w.toc.Symbols)) if err != nil { - return errors.Wrap(err, "read symbols") + return fmt.Errorf("read symbols: %w", err) } return nil } @@ -660,7 +659,7 @@ func (w *Writer) writeLabelIndex(name string, values []uint32) error { w.buf1.Reset() l := w.f.pos - startPos - 4 if l > math.MaxUint32 { - return errors.Errorf("label index size exceeds 4 bytes: %d", l) + return fmt.Errorf("label index size exceeds 4 bytes: %d", l) } w.buf1.PutBE32int(int(l)) if err := w.writeAt(w.buf1.Get(), startPos); err != nil { @@ -704,7 +703,7 @@ func (w *Writer) writeLabelIndexesOffsetTable() error { w.buf1.Reset() l := w.f.pos - startPos - 4 if l > math.MaxUint32 { - return errors.Errorf("label indexes offset table size exceeds 4 bytes: %d", l) + return fmt.Errorf("label indexes offset table size exceeds 4 bytes: %d", l) } w.buf1.PutBE32int(int(l)) if err := w.writeAt(w.buf1.Get(), startPos); err != nil { @@ -785,7 +784,7 @@ func (w *Writer) writePostingsOffsetTable() error { w.buf1.Reset() l := w.f.pos - startPos - 4 if l > math.MaxUint32 { - return errors.Errorf("postings offset table size exceeds 4 bytes: %d", l) + return fmt.Errorf("postings offset table size exceeds 4 bytes: %d", l) } w.buf1.PutBE32int(int(l)) if err := w.writeAt(w.buf1.Get(), startPos); err != nil { @@ -839,7 +838,7 @@ func (w *Writer) writePostingsToTmpFiles() error { d.ConsumePadding() startPos := w.toc.LabelIndices - uint64(d.Len()) if startPos%16 != 0 { - return errors.Errorf("series not 16-byte aligned at %d", startPos) + return fmt.Errorf("series not 16-byte aligned at %d", startPos) } offsets = append(offsets, uint32(startPos/16)) // Skip to next series. @@ -964,7 +963,7 @@ func (w *Writer) writePosting(name, value string, offs []uint32) error { for _, off := range offs { if off > (1<<32)-1 { - return errors.Errorf("series offset %d exceeds 4 bytes", off) + return fmt.Errorf("series offset %d exceeds 4 bytes", off) } w.buf1.PutBE32(off) } @@ -973,7 +972,7 @@ func (w *Writer) writePosting(name, value string, offs []uint32) error { l := w.buf1.Len() // We convert to uint to make code compile on 32-bit systems, as math.MaxUint32 doesn't fit into int there. if uint(l) > math.MaxUint32 { - return errors.Errorf("posting size exceeds 4 bytes: %d", l) + return fmt.Errorf("posting size exceeds 4 bytes: %d", l) } w.buf2.PutBE32int(l) w.buf1.PutHash(w.crc32) @@ -1000,7 +999,7 @@ func (w *Writer) writePostings() error { return err } if uint64(n) != w.fP.pos { - return errors.Errorf("wrote %d bytes to posting temporary file, but only read back %d", w.fP.pos, n) + return fmt.Errorf("wrote %d bytes to posting temporary file, but only read back %d", w.fP.pos, n) } w.f.pos += uint64(n) @@ -1135,26 +1134,26 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) { // Verify header. if r.b.Len() < HeaderLen { - return nil, errors.Wrap(encoding.ErrInvalidSize, "index header") + return nil, fmt.Errorf("index header: %w", encoding.ErrInvalidSize) } if m := binary.BigEndian.Uint32(r.b.Range(0, 4)); m != MagicIndex { - return nil, errors.Errorf("invalid magic number %x", m) + return nil, fmt.Errorf("invalid magic number %x", m) } r.version = int(r.b.Range(4, 5)[0]) if r.version != FormatV1 && r.version != FormatV2 { - return nil, errors.Errorf("unknown index file version %d", r.version) + return nil, fmt.Errorf("unknown index file version %d", r.version) } var err error r.toc, err = NewTOCFromByteSlice(b) if err != nil { - return nil, errors.Wrap(err, "read TOC") + return nil, fmt.Errorf("read TOC: %w", err) } r.symbols, err = NewSymbols(r.b, r.version, int(r.toc.Symbols)) if err != nil { - return nil, errors.Wrap(err, "read symbols") + return nil, fmt.Errorf("read symbols: %w", err) } if r.version == FormatV1 { @@ -1169,7 +1168,7 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) { r.postingsV1[string(name)][string(value)] = off return nil }); err != nil { - return nil, errors.Wrap(err, "read postings table") + return nil, fmt.Errorf("read postings table: %w", err) } } else { var lastName, lastValue []byte @@ -1197,7 +1196,7 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) { valueCount++ return nil }); err != nil { - return nil, errors.Wrap(err, "read postings table") + return nil, fmt.Errorf("read postings table: %w", err) } if lastName != nil { r.postings[string(lastName)] = append(r.postings[string(lastName)], postingOffset{value: string(lastValue), off: lastOff}) @@ -1217,7 +1216,7 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) { } off, err := r.symbols.ReverseLookup(k) if err != nil { - return nil, errors.Wrap(err, "reverse symbol lookup") + return nil, fmt.Errorf("reverse symbol lookup: %w", err) } r.nameSymbols[off] = k } @@ -1252,7 +1251,7 @@ func (r *Reader) PostingsRanges() (map[labels.Label]Range, error) { } return nil }); err != nil { - return nil, errors.Wrap(err, "read postings table") + return nil, fmt.Errorf("read postings table: %w", err) } return m, nil } @@ -1302,7 +1301,7 @@ func (s Symbols) Lookup(o uint32) (string, error) { if s.version == FormatV2 { if int(o) >= s.seen { - return "", errors.Errorf("unknown symbol offset %d", o) + return "", fmt.Errorf("unknown symbol offset %d", o) } d.Skip(s.offsets[int(o/symbolFactor)]) // Walk until we find the one we want. @@ -1321,7 +1320,7 @@ func (s Symbols) Lookup(o uint32) (string, error) { func (s Symbols) ReverseLookup(sym string) (uint32, error) { if len(s.offsets) == 0 { - return 0, errors.Errorf("unknown symbol %q - no symbols", sym) + return 0, fmt.Errorf("unknown symbol %q - no symbols", sym) } i := sort.Search(len(s.offsets), func(i int) bool { // Any decoding errors here will be lost, however @@ -1354,7 +1353,7 @@ func (s Symbols) ReverseLookup(sym string) (uint32, error) { return 0, d.Err() } if lastSymbol != sym { - return 0, errors.Errorf("unknown symbol %q", sym) + return 0, fmt.Errorf("unknown symbol %q", sym) } if s.version == FormatV2 { return uint32(res), nil @@ -1413,7 +1412,7 @@ func ReadPostingsOffsetTable(bs ByteSlice, off uint64, f func(name, value []byte offsetPos := startLen - d.Len() if keyCount := d.Uvarint(); keyCount != 2 { - return errors.Errorf("unexpected number of keys for postings offset table %d", keyCount) + return fmt.Errorf("unexpected number of keys for postings offset table %d", keyCount) } name := d.UvarintBytes() value := d.UvarintBytes() @@ -1468,7 +1467,7 @@ func (r *Reader) SortedLabelValues(ctx context.Context, name string, matchers .. // TODO(replay): Support filtering by matchers. func (r *Reader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { if len(matchers) > 0 { - return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers) + return nil, fmt.Errorf("matchers parameter is not implemented: %+v", matchers) } if r.version == FormatV1 { @@ -1516,7 +1515,7 @@ func (r *Reader) LabelValues(ctx context.Context, name string, matchers ...*labe d.Uvarint64() // Offset. } if d.Err() != nil { - return nil, errors.Wrap(d.Err(), "get postings offset entry") + return nil, fmt.Errorf("get postings offset entry: %w", d.Err()) } return values, ctx.Err() @@ -1542,12 +1541,12 @@ func (r *Reader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([ d := encoding.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable) buf := d.Get() if d.Err() != nil { - return nil, errors.Wrap(d.Err(), "get buffer for series") + return nil, fmt.Errorf("get buffer for series: %w", d.Err()) } offsets, err := r.dec.LabelNamesOffsetsFor(buf) if err != nil { - return nil, errors.Wrap(err, "get label name offsets") + return nil, fmt.Errorf("get label name offsets: %w", err) } for _, off := range offsets { offsetsMap[off] = struct{}{} @@ -1559,7 +1558,7 @@ func (r *Reader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([ for off := range offsetsMap { name, err := r.lookupSymbol(ctx, off) if err != nil { - return nil, errors.Wrap(err, "lookup symbol in LabelNamesFor") + return nil, fmt.Errorf("lookup symbol in LabelNamesFor: %w", err) } names = append(names, name) } @@ -1580,7 +1579,7 @@ func (r *Reader) LabelValueFor(ctx context.Context, id storage.SeriesRef, label d := encoding.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable) buf := d.Get() if d.Err() != nil { - return "", errors.Wrap(d.Err(), "label values for") + return "", fmt.Errorf("label values for: %w", d.Err()) } value, err := r.dec.LabelValueFor(ctx, buf, label) @@ -1607,7 +1606,11 @@ func (r *Reader) Series(id storage.SeriesRef, builder *labels.ScratchBuilder, ch if d.Err() != nil { return d.Err() } - return errors.Wrap(r.dec.Series(d.Get(), builder, chks), "read series") + err := r.dec.Series(d.Get(), builder, chks) + if err != nil { + return fmt.Errorf("read series: %w", err) + } + return nil } func (r *Reader) Postings(ctx context.Context, name string, values ...string) (Postings, error) { @@ -1626,7 +1629,7 @@ func (r *Reader) Postings(ctx context.Context, name string, values ...string) (P d := encoding.NewDecbufAt(r.b, int(postingsOff), castagnoliTable) _, p, err := r.dec.Postings(d.Get()) if err != nil { - return nil, errors.Wrap(err, "decode postings") + return nil, fmt.Errorf("decode postings: %w", err) } res = append(res, p) } @@ -1688,7 +1691,7 @@ func (r *Reader) Postings(ctx context.Context, name string, values ...string) (P d2 := encoding.NewDecbufAt(r.b, int(postingsOff), castagnoliTable) _, p, err := r.dec.Postings(d2.Get()) if err != nil { - return nil, errors.Wrap(err, "decode postings") + return nil, fmt.Errorf("decode postings: %w", err) } res = append(res, p) } @@ -1704,10 +1707,10 @@ func (r *Reader) Postings(ctx context.Context, name string, values ...string) (P } } if d.Err() != nil { - return nil, errors.Wrap(d.Err(), "get postings offset entry") + return nil, fmt.Errorf("get postings offset entry: %w", d.Err()) } if ctx.Err() != nil { - return nil, errors.Wrap(ctx.Err(), "get postings offset entry") + return nil, fmt.Errorf("get postings offset entry: %w", ctx.Err()) } } @@ -1729,7 +1732,7 @@ func (r *Reader) Size() int64 { // TODO(twilkie) implement support for matchers. func (r *Reader) LabelNames(_ context.Context, matchers ...*labels.Matcher) ([]string, error) { if len(matchers) > 0 { - return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers) + return nil, fmt.Errorf("matchers parameter is not implemented: %+v", matchers) } labelNames := make([]string, 0, len(r.postings)) @@ -1800,7 +1803,7 @@ func (dec *Decoder) LabelNamesOffsetsFor(b []byte) ([]uint32, error) { _ = d.Uvarint() // skip the label value if d.Err() != nil { - return nil, errors.Wrap(d.Err(), "read series label offsets") + return nil, fmt.Errorf("read series label offsets: %w", d.Err()) } } @@ -1817,18 +1820,18 @@ func (dec *Decoder) LabelValueFor(ctx context.Context, b []byte, label string) ( lvo := uint32(d.Uvarint()) if d.Err() != nil { - return "", errors.Wrap(d.Err(), "read series label offsets") + return "", fmt.Errorf("read series label offsets: %w", d.Err()) } ln, err := dec.LookupSymbol(ctx, lno) if err != nil { - return "", errors.Wrap(err, "lookup label name") + return "", fmt.Errorf("lookup label name: %w", err) } if ln == label { lv, err := dec.LookupSymbol(ctx, lvo) if err != nil { - return "", errors.Wrap(err, "lookup label value") + return "", fmt.Errorf("lookup label value: %w", err) } return lv, nil @@ -1853,16 +1856,16 @@ func (dec *Decoder) Series(b []byte, builder *labels.ScratchBuilder, chks *[]chu lvo := uint32(d.Uvarint()) if d.Err() != nil { - return errors.Wrap(d.Err(), "read series label offsets") + return fmt.Errorf("read series label offsets: %w", d.Err()) } ln, err := dec.LookupSymbol(context.TODO(), lno) if err != nil { - return errors.Wrap(err, "lookup label name") + return fmt.Errorf("lookup label name: %w", err) } lv, err := dec.LookupSymbol(context.TODO(), lvo) if err != nil { - return errors.Wrap(err, "lookup label value") + return fmt.Errorf("lookup label value: %w", err) } builder.Add(ln, lv) @@ -1894,7 +1897,7 @@ func (dec *Decoder) Series(b []byte, builder *labels.ScratchBuilder, chks *[]chu t0 = maxt if d.Err() != nil { - return errors.Wrapf(d.Err(), "read meta for chunk %d", i) + return fmt.Errorf("read meta for chunk %d: %w", i, d.Err()) } *chks = append(*chks, chunks.Meta{ diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go index 7a6683da2..6c5e313d4 100644 --- a/tsdb/index/index_test.go +++ b/tsdb/index/index_test.go @@ -15,6 +15,7 @@ package index import ( "context" + "errors" "fmt" "hash/crc32" "math/rand" @@ -23,7 +24,6 @@ import ( "sort" "testing" - "github.com/pkg/errors" "github.com/stretchr/testify/require" "go.uber.org/goleak" @@ -66,7 +66,7 @@ func (m mockIndex) Symbols() (map[string]struct{}, error) { func (m mockIndex) AddSeries(ref storage.SeriesRef, l labels.Labels, chunks ...chunks.Meta) error { if _, ok := m.series[ref]; ok { - return errors.Errorf("series with reference %d already added", ref) + return fmt.Errorf("series with reference %d already added", ref) } l.Range(func(lbl labels.Label) { m.symbols[lbl.Name] = struct{}{} @@ -115,7 +115,7 @@ func (m mockIndex) Postings(ctx context.Context, name string, values ...string) func (m mockIndex) SortedPostings(p Postings) Postings { ep, err := ExpandPostings(p) if err != nil { - return ErrPostings(errors.Wrap(err, "expand postings")) + return ErrPostings(fmt.Errorf("expand postings: %w", err)) } sort.Slice(ep, func(i, j int) bool { diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index f79a8d4cf..c83957427 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -17,12 +17,12 @@ import ( "container/heap" "context" "encoding/binary" + "fmt" "runtime" "sort" "strings" "sync" - "github.com/pkg/errors" "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" @@ -927,7 +927,7 @@ func (h *postingsWithIndexHeap) next() error { } if err := pi.p.Err(); err != nil { - return errors.Wrapf(err, "postings %d", pi.index) + return fmt.Errorf("postings %d: %w", pi.index, err) } h.popIndex() return nil diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index 783b5f84f..04282c332 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -17,13 +17,13 @@ import ( "container/heap" "context" "encoding/binary" + "errors" "fmt" "math/rand" "sort" "strconv" "testing" - "github.com/pkg/errors" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/labels" From 80d2f992ae791756de8e21cf25c2e1acbf99ca3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jacob=20Baung=C3=A5rd=20Hansen?= Date: Fri, 10 Nov 2023 11:22:32 +0100 Subject: [PATCH 566/632] codemirror-promql: Add request header to client (#13118) With this commit we make it possible to adjust the request headers sent to Prometheus by the codemirror-promql extension. This enables customizing the headers sent, without re-implementing the Prometheus client completely. Signed-off-by: Jacob Baungard Hansen --- web/ui/module/codemirror-promql/README.md | 9 +++++++++ .../module/codemirror-promql/src/client/prometheus.ts | 10 ++++++++++ 2 files changed, 19 insertions(+) diff --git a/web/ui/module/codemirror-promql/README.md b/web/ui/module/codemirror-promql/README.md index 627e4fe15..8fb188e86 100644 --- a/web/ui/module/codemirror-promql/README.md +++ b/web/ui/module/codemirror-promql/README.md @@ -161,6 +161,15 @@ You can change it to use the HTTP method `GET` if you prefer. const promQL = new PromQLExtension().setComplete({remote: {httpMethod: 'GET'}}) ``` +###### HTTP request headers + +If you need to send specific HTTP headers along with the requests to Prometheus, you can adjust that as follows: + +```typescript +const customHeaders = new Headers({'header-name': 'test-value'}); +const promql = new PromQLExtension().setComplete({remote: {requestHeaders: customHeaders}}) +``` + ###### Override the API Prefix The default Prometheus Client, when building the query to get data from Prometheus, is using an API prefix which is by diff --git a/web/ui/module/codemirror-promql/src/client/prometheus.ts b/web/ui/module/codemirror-promql/src/client/prometheus.ts index a9c7f7456..873cbb0d2 100644 --- a/web/ui/module/codemirror-promql/src/client/prometheus.ts +++ b/web/ui/module/codemirror-promql/src/client/prometheus.ts @@ -58,6 +58,7 @@ export interface PrometheusConfig { cache?: CacheConfig; httpMethod?: 'POST' | 'GET'; apiPrefix?: string; + requestHeaders?: Headers; } interface APIResponse { @@ -84,6 +85,7 @@ export class HTTPPrometheusClient implements PrometheusClient { // For some reason, just assigning via "= fetch" here does not end up executing fetch correctly // when calling it, thus the indirection via another function wrapper. private readonly fetchFn: FetchFn = (input: RequestInfo, init?: RequestInit): Promise => fetch(input, init); + private requestHeaders: Headers = new Headers(); constructor(config: PrometheusConfig) { this.url = config.url ? config.url : ''; @@ -100,6 +102,9 @@ export class HTTPPrometheusClient implements PrometheusClient { if (config.apiPrefix) { this.apiPrefix = config.apiPrefix; } + if (config.requestHeaders) { + this.requestHeaders = config.requestHeaders; + } } labelNames(metricName?: string): Promise { @@ -221,6 +226,11 @@ export class HTTPPrometheusClient implements PrometheusClient { } private fetchAPI(resource: string, init?: RequestInit): Promise { + if (init) { + init.headers = this.requestHeaders; + } else { + init = { headers: this.requestHeaders }; + } return this.fetchFn(this.url + resource, init) .then((res) => { if (!res.ok && ![badRequest, unprocessableEntity, serviceUnavailable].includes(res.status)) { From e250f09b5d34d6c936b18f3b7699df23a0555092 Mon Sep 17 00:00:00 2001 From: Ziqi Zhao Date: Fri, 10 Nov 2023 21:33:34 +0800 Subject: [PATCH 567/632] change origin schema in `ReduceResolution` method of histogram and float histogram (#13116) * change origin schema in ReduceResolution method of histogram and float histogram Signed-off-by: Ziqi Zhao --------- Signed-off-by: Ziqi Zhao --- model/histogram/float_histogram.go | 2 +- model/histogram/float_histogram_test.go | 43 +++++++++++++++++++++++++ model/histogram/histogram.go | 1 + model/histogram/histogram_test.go | 43 +++++++++++++++++++++++++ 4 files changed, 88 insertions(+), 1 deletion(-) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 212b02880..e0f5d208e 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -1112,6 +1112,6 @@ func floatBucketsMatch(b1, b2 []float64) bool { func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram { h.PositiveSpans, h.PositiveBuckets = reduceResolution(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, false) h.NegativeSpans, h.NegativeBuckets = reduceResolution(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, false) - + h.Schema = targetSchema return h } diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index 6f445c0cf..bfe3525fa 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -2442,3 +2442,46 @@ func createRandomSpans(rng *rand.Rand, spanNum int32) ([]Span, []float64) { } return Spans, Buckets } + +func TestFloatHistogramReduceResolution(t *testing.T) { + tcs := map[string]struct { + origin *FloatHistogram + target *FloatHistogram + }{ + "valid float histogram": { + origin: &FloatHistogram{ + Schema: 0, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 3, Length: 2}, + }, + PositiveBuckets: []float64{1, 3, 1, 2, 1, 1}, + NegativeSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 3, Length: 2}, + }, + NegativeBuckets: []float64{1, 3, 1, 2, 1, 1}, + }, + target: &FloatHistogram{ + Schema: -1, + PositiveSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []float64{1, 4, 2, 2}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 1}, + }, + NegativeBuckets: []float64{1, 4, 2, 2}, + }, + }, + } + + for _, tc := range tcs { + target := tc.origin.ReduceResolution(tc.target.Schema) + require.Equal(t, tc.target, target) + } +} diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go index 4699bd3cb..3ebb27fbc 100644 --- a/model/histogram/histogram.go +++ b/model/histogram/histogram.go @@ -503,5 +503,6 @@ func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram { h.NegativeSpans, h.NegativeBuckets = reduceResolution( h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, true, ) + h.Schema = targetSchema return h } diff --git a/model/histogram/histogram_test.go b/model/histogram/histogram_test.go index 6f12f53e8..5aa9ca6fe 100644 --- a/model/histogram/histogram_test.go +++ b/model/histogram/histogram_test.go @@ -967,3 +967,46 @@ func BenchmarkHistogramValidation(b *testing.B) { require.NoError(b, h.Validate()) } } + +func TestHistogramReduceResolution(t *testing.T) { + tcs := map[string]struct { + origin *Histogram + target *Histogram + }{ + "valid histogram": { + origin: &Histogram{ + Schema: 0, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 3, Length: 2}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0}, + NegativeSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 3, Length: 2}, + }, + NegativeBuckets: []int64{1, 2, -2, 1, -1, 0}, + }, + target: &Histogram{ + Schema: -1, + PositiveSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []int64{1, 3, -2, 0}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 1}, + }, + NegativeBuckets: []int64{1, 3, -2, 0}, + }, + }, + } + + for _, tc := range tcs { + target := tc.origin.ReduceResolution(tc.target.Schema) + require.Equal(t, tc.target, target) + } +} From b94f32f6fa3b619498dd6e1dec9677ab9d30e7de Mon Sep 17 00:00:00 2001 From: Ziqi Zhao Date: Sat, 11 Nov 2023 22:24:47 +0800 Subject: [PATCH 568/632] automatically reduce resolution rather than fail scrape Signed-off-by: Ziqi Zhao --- scrape/target.go | 16 ++++++++++++---- scrape/target_test.go | 5 +++++ 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/scrape/target.go b/scrape/target.go index 8cc8597a4..ed4c598b7 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -366,13 +366,21 @@ type bucketLimitAppender struct { func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if h != nil { - if len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit { - return 0, errBucketLimit + for len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit { + lastSum := len(h.PositiveBuckets) + len(h.NegativeBuckets) + h = h.ReduceResolution(h.Schema - 1) + if len(h.PositiveBuckets)+len(h.NegativeBuckets) == lastSum { + return 0, errBucketLimit + } } } if fh != nil { - if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit { - return 0, errBucketLimit + for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit { + lastSum := len(fh.PositiveBuckets) + len(fh.NegativeBuckets) + fh = fh.ReduceResolution(fh.Schema - 1) + if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) == lastSum { + return 0, errBucketLimit + } } } ref, err := app.Appender.AppendHistogram(ref, lset, t, h, fh) diff --git a/scrape/target_test.go b/scrape/target_test.go index 4f0c840cd..f676b85c0 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -517,6 +517,11 @@ func TestBucketLimitAppender(t *testing.T) { limit: 3, expectError: true, }, + { + h: example, + limit: 4, + expectError: false, + }, { h: example, limit: 10, From 39a35d92bcc476f9c1072e9fb797b168c73826c6 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Sat, 11 Nov 2023 17:30:16 +0100 Subject: [PATCH 569/632] tsdb/head: wlog exemplars after samples (#13113) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When samples are committed in the head, they are also written to the WAL. The order of WAL records should be sample then exemplar, but this was not the case for native histogram samples. This PR fixes that. The problem with the wrong order is that remote write reads the WAL and sends the recorded timeseries in the WAL order, which means exemplars arrived before histogram samples. If the receiving side is Prometheus TSDB and the series has not existed before then the exemplar does not currently create the series. Which means the exemplar is rejected and lost. Signed-off-by: György Krajcsovits --- tsdb/head_append.go | 20 +++++++++------- tsdb/head_test.go | 57 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+), 8 deletions(-) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 3663c800a..785e99db0 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -689,14 +689,6 @@ func (a *headAppender) log() error { return errors.Wrap(err, "log samples") } } - if len(a.exemplars) > 0 { - rec = enc.Exemplars(exemplarsForEncoding(a.exemplars), buf) - buf = rec[:0] - - if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log exemplars") - } - } if len(a.histograms) > 0 { rec = enc.HistogramSamples(a.histograms, buf) buf = rec[:0] @@ -711,6 +703,18 @@ func (a *headAppender) log() error { return errors.Wrap(err, "log float histograms") } } + // Exemplars should be logged after samples (float/native histogram/etc), + // otherwise it might happen that we send the exemplars in a remote write + // batch before the samples, which in turn means the exemplar is rejected + // for missing series, since series are created due to samples. + if len(a.exemplars) > 0 { + rec = enc.Exemplars(exemplarsForEncoding(a.exemplars), buf) + buf = rec[:0] + + if err := a.head.wal.Log(rec); err != nil { + return errors.Wrap(err, "log exemplars") + } + } return nil } diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 1216dd0a6..253f92d61 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -22,6 +22,7 @@ import ( "os" "path" "path/filepath" + "reflect" "sort" "strconv" "strings" @@ -190,6 +191,10 @@ func readTestWAL(t testing.TB, dir string) (recs []interface{}) { meta, err := dec.Metadata(rec, nil) require.NoError(t, err) recs = append(recs, meta) + case record.Exemplars: + exemplars, err := dec.Exemplars(rec, nil) + require.NoError(t, err) + recs = append(recs, exemplars) default: t.Fatalf("unknown record type") } @@ -5457,3 +5462,55 @@ func TestHeadDetectsDuplicateSampleAtSizeLimit(t *testing.T) { require.Equal(t, numSamples/2, storedSampleCount) } + +func TestWALSampleAndExemplarOrder(t *testing.T) { + lbls := labels.FromStrings("foo", "bar") + testcases := map[string]struct { + appendF func(app storage.Appender, ts int64) (storage.SeriesRef, error) + expectedType reflect.Type + }{ + "float sample": { + appendF: func(app storage.Appender, ts int64) (storage.SeriesRef, error) { + return app.Append(0, lbls, ts, 1.0) + }, + expectedType: reflect.TypeOf([]record.RefSample{}), + }, + "histogram sample": { + appendF: func(app storage.Appender, ts int64) (storage.SeriesRef, error) { + return app.AppendHistogram(0, lbls, ts, tsdbutil.GenerateTestHistogram(1), nil) + }, + expectedType: reflect.TypeOf([]record.RefHistogramSample{}), + }, + "float histogram sample": { + appendF: func(app storage.Appender, ts int64) (storage.SeriesRef, error) { + return app.AppendHistogram(0, lbls, ts, nil, tsdbutil.GenerateTestFloatHistogram(1)) + }, + expectedType: reflect.TypeOf([]record.RefFloatHistogramSample{}), + }, + } + + for testName, tc := range testcases { + t.Run(testName, func(t *testing.T) { + h, w := newTestHead(t, 1000, wlog.CompressionNone, false) + defer func() { + require.NoError(t, h.Close()) + }() + + app := h.Appender(context.Background()) + ref, err := tc.appendF(app, 10) + require.NoError(t, err) + app.AppendExemplar(ref, lbls, exemplar.Exemplar{Value: 1.0, Ts: 5}) + + app.Commit() + + recs := readTestWAL(t, w.Dir()) + require.Len(t, recs, 3) + _, ok := recs[0].([]record.RefSeries) + require.True(t, ok, "expected first record to be a RefSeries") + actualType := reflect.TypeOf(recs[1]) + require.Equal(t, tc.expectedType, actualType, "expected second record to be a %s", tc.expectedType) + _, ok = recs[2].([]record.RefExemplar) + require.True(t, ok, "expected third record to be a RefExemplar") + }) + } +} From 4d6d3c171566f38450598685f7584ece3ad65692 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Sat, 11 Nov 2023 19:01:11 +0100 Subject: [PATCH 570/632] tsdb/encoding: use Go standard errors package Signed-off-by: Matthieu MOREL --- tsdb/encoding/encoding.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tsdb/encoding/encoding.go b/tsdb/encoding/encoding.go index ab97876a3..cd98fbd82 100644 --- a/tsdb/encoding/encoding.go +++ b/tsdb/encoding/encoding.go @@ -15,13 +15,14 @@ package encoding import ( "encoding/binary" + "errors" + "fmt" "hash" "hash/crc32" "math" "unsafe" "github.com/dennwc/varint" - "github.com/pkg/errors" ) var ( @@ -153,7 +154,7 @@ func NewDecbufUvarintAt(bs ByteSlice, off int, castagnoliTable *crc32.Table) Dec l, n := varint.Uvarint(b) if n <= 0 || n > binary.MaxVarintLen32 { - return Decbuf{E: errors.Errorf("invalid uvarint %d", n)} + return Decbuf{E: fmt.Errorf("invalid uvarint %d", n)} } if bs.Len() < off+n+int(l)+4 { From 118460a64fc3662755bfa63a0f882fdfbda0c86b Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Sat, 11 Nov 2023 19:19:50 +0100 Subject: [PATCH 571/632] tsdb/tombstones: use Go standard errors package Signed-off-by: Matthieu MOREL --- tsdb/tombstones/tombstones.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tsdb/tombstones/tombstones.go b/tsdb/tombstones/tombstones.go index 94daf5195..f7884f9bf 100644 --- a/tsdb/tombstones/tombstones.go +++ b/tsdb/tombstones/tombstones.go @@ -15,6 +15,7 @@ package tombstones import ( "encoding/binary" + "errors" "fmt" "hash" "hash/crc32" @@ -26,7 +27,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/encoding" @@ -109,17 +109,17 @@ func WriteFile(logger log.Logger, dir string, tr Reader) (int64, error) { bytes, err := Encode(tr) if err != nil { - return 0, errors.Wrap(err, "encoding tombstones") + return 0, fmt.Errorf("encoding tombstones: %w", err) } // Ignore first byte which is the format type. We do this for compatibility. if _, err := hash.Write(bytes[tombstoneFormatVersionSize:]); err != nil { - return 0, errors.Wrap(err, "calculating hash for tombstones") + return 0, fmt.Errorf("calculating hash for tombstones: %w", err) } n, err = f.Write(bytes) if err != nil { - return 0, errors.Wrap(err, "writing tombstones") + return 0, fmt.Errorf("writing tombstones: %w", err) } size += n @@ -161,7 +161,7 @@ func Encode(tr Reader) ([]byte, error) { func Decode(b []byte) (Reader, error) { d := &encoding.Decbuf{B: b} if flag := d.Byte(); flag != tombstoneFormatV1 { - return nil, errors.Errorf("invalid tombstone format %x", flag) + return nil, fmt.Errorf("invalid tombstone format %x", flag) } if d.Err() != nil { @@ -199,7 +199,7 @@ func ReadTombstones(dir string) (Reader, int64, error) { } if len(b) < tombstonesHeaderSize { - return nil, 0, errors.Wrap(encoding.ErrInvalidSize, "tombstones header") + return nil, 0, fmt.Errorf("tombstones header", encoding.ErrInvalidSize) } d := &encoding.Decbuf{B: b[:len(b)-tombstonesCRCSize]} @@ -211,7 +211,7 @@ func ReadTombstones(dir string) (Reader, int64, error) { hash := newCRC32() // Ignore first byte which is the format type. if _, err := hash.Write(d.Get()[tombstoneFormatVersionSize:]); err != nil { - return nil, 0, errors.Wrap(err, "write to hash") + return nil, 0, fmt.Errorf("write to hash: %w", err) } if binary.BigEndian.Uint32(b[len(b)-tombstonesCRCSize:]) != hash.Sum32() { return nil, 0, errors.New("checksum did not match") From c74b7ad4fba0c91b1ff2e38adc1ab548f98a64f0 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Sat, 11 Nov 2023 19:22:06 +0100 Subject: [PATCH 572/632] Update tombstones.go Signed-off-by: Matthieu MOREL --- tsdb/tombstones/tombstones.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsdb/tombstones/tombstones.go b/tsdb/tombstones/tombstones.go index f7884f9bf..4cea5005d 100644 --- a/tsdb/tombstones/tombstones.go +++ b/tsdb/tombstones/tombstones.go @@ -199,7 +199,7 @@ func ReadTombstones(dir string) (Reader, int64, error) { } if len(b) < tombstonesHeaderSize { - return nil, 0, fmt.Errorf("tombstones header", encoding.ErrInvalidSize) + return nil, 0, fmt.Errorf("tombstones header: %w", encoding.ErrInvalidSize) } d := &encoding.Decbuf{B: b[:len(b)-tombstonesCRCSize]} From 63691d82a5c892ff52dde4048fa549ff35ecbf5f Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Sat, 11 Nov 2023 20:52:49 +0100 Subject: [PATCH 573/632] tsdb/record: use Go standard errors package Signed-off-by: Matthieu MOREL --- tsdb/record/record.go | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/tsdb/record/record.go b/tsdb/record/record.go index 442e6cd8c..ad4c324c6 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -16,10 +16,10 @@ package record import ( + "errors" + "fmt" "math" - "github.com/pkg/errors" - "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/textparse" @@ -229,7 +229,7 @@ func (d *Decoder) Series(rec []byte, series []RefSeries) ([]RefSeries, error) { return nil, dec.Err() } if len(dec.B) > 0 { - return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return series, nil } @@ -272,7 +272,7 @@ func (d *Decoder) Metadata(rec []byte, metadata []RefMetadata) ([]RefMetadata, e return nil, dec.Err() } if len(dec.B) > 0 { - return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return metadata, nil } @@ -321,10 +321,10 @@ func (d *Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error) } if dec.Err() != nil { - return nil, errors.Wrapf(dec.Err(), "decode error after %d samples", len(samples)) + return nil, fmt.Errorf("decode error after %d samples: %w", len(samples), dec.Err()) } if len(dec.B) > 0 { - return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return samples, nil } @@ -348,7 +348,7 @@ func (d *Decoder) Tombstones(rec []byte, tstones []tombstones.Stone) ([]tombston return nil, dec.Err() } if len(dec.B) > 0 { - return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return tstones, nil } @@ -386,10 +386,10 @@ func (d *Decoder) ExemplarsFromBuffer(dec *encoding.Decbuf, exemplars []RefExemp } if dec.Err() != nil { - return nil, errors.Wrapf(dec.Err(), "decode error after %d exemplars", len(exemplars)) + return nil, fmt.Errorf("decode error after %d exemplars: %w", len(exemplars), dec.Err()) } if len(dec.B) > 0 { - return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return exemplars, nil } @@ -414,10 +414,10 @@ func (d *Decoder) MmapMarkers(rec []byte, markers []RefMmapMarker) ([]RefMmapMar } if dec.Err() != nil { - return nil, errors.Wrapf(dec.Err(), "decode error after %d mmap markers", len(markers)) + return nil, fmt.Errorf("decode error after %d mmap markers: %w", len(markers), dec.Err()) } if len(dec.B) > 0 { - return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return markers, nil } @@ -450,10 +450,10 @@ func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) } if dec.Err() != nil { - return nil, errors.Wrapf(dec.Err(), "decode error after %d histograms", len(histograms)) + return nil, fmt.Errorf("decode error after %d histograms: %w", len(histograms), dec.Err()) } if len(dec.B) > 0 { - return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return histograms, nil } @@ -532,10 +532,10 @@ func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogr } if dec.Err() != nil { - return nil, errors.Wrapf(dec.Err(), "decode error after %d histograms", len(histograms)) + return nil, fmt.Errorf("decode error after %d histograms: %w", len(histograms), dec.Err()) } if len(dec.B) > 0 { - return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return histograms, nil } From 69c07ec6ae295c5879e672fb7de6f9715b5e73c7 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Sat, 11 Nov 2023 20:57:42 +0100 Subject: [PATCH 574/632] Update record_test.go Signed-off-by: Matthieu MOREL --- tsdb/record/record_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index 518942314..9111350a7 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -15,10 +15,10 @@ package record import ( + "errors" "math/rand" "testing" - "github.com/pkg/errors" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" @@ -209,7 +209,7 @@ func TestRecord_Corrupted(t *testing.T) { corrupted := enc.Samples(samples, nil)[:8] _, err := dec.Samples(corrupted, nil) - require.Equal(t, errors.Cause(err), encoding.ErrInvalidSize) + require.True(t, errors.Is(err, encoding.ErrInvalidSize)) }) t.Run("Test corrupted tombstone record", func(t *testing.T) { @@ -232,7 +232,7 @@ func TestRecord_Corrupted(t *testing.T) { corrupted := enc.Exemplars(exemplars, nil)[:8] _, err := dec.Exemplars(corrupted, nil) - require.Equal(t, errors.Cause(err), encoding.ErrInvalidSize) + require.True(t, errors.Is(err, encoding.ErrInvalidSize)) }) t.Run("Test corrupted metadata record", func(t *testing.T) { @@ -242,7 +242,7 @@ func TestRecord_Corrupted(t *testing.T) { corrupted := enc.Metadata(meta, nil)[:8] _, err := dec.Metadata(corrupted, nil) - require.Equal(t, errors.Cause(err), encoding.ErrInvalidSize) + require.True(t, errors.Is(err, encoding.ErrInvalidSize)) }) t.Run("Test corrupted histogram record", func(t *testing.T) { @@ -267,7 +267,7 @@ func TestRecord_Corrupted(t *testing.T) { corrupted := enc.HistogramSamples(histograms, nil)[:8] _, err := dec.HistogramSamples(corrupted, nil) - require.Equal(t, errors.Cause(err), encoding.ErrInvalidSize) + require.True(t, errors.Is(err, encoding.ErrInvalidSize)) }) } From 469e415d09d185b6a61c90bd4c3eda821e78321b Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Sat, 11 Nov 2023 21:01:24 +0100 Subject: [PATCH 575/632] Update record.go Signed-off-by: Matthieu MOREL --- tsdb/record/record.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsdb/record/record.go b/tsdb/record/record.go index ad4c324c6..75c15c490 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -532,7 +532,7 @@ func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogr } if dec.Err() != nil { - return nil, fmt.Errorf("decode error after %d histograms: %w", len(histograms), dec.Err()) + return nil, fmt.Errorf("decode error after %d histograms: %w", len(histograms), dec.Err()) } if len(dec.B) > 0 { return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) From acc114fe553b660cefc71a0311792ef8be4a186a Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Sun, 12 Nov 2023 15:51:37 +0100 Subject: [PATCH 576/632] Fix panic during tsdb Commit (#13092) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix panic during tsdb Commit Fixes the following panic: runtime error: invalid memory address or nil pointer dereference [signal SIGSEGV: segmentation violation code=0x1 addr=0x20 pc=0x19deb45] goroutine 651118930 [running]: github.com/prometheus/prometheus/tsdb.(*headAppender).Commit(0xc19100f7c0) /drone/src/vendor/github.com/prometheus/prometheus/tsdb/head_append.go:855 +0x245 github.com/prometheus/prometheus/tsdb.dbAppender.Commit({{0x35bd6f0?, 0xc19100f7c0?}, 0xc000fa4c00?}) /drone/src/vendor/github.com/prometheus/prometheus/tsdb/db.go:1159 +0x2f We theorize that the panic happened due the the series referenced by the exemplar being removed between AppendExemplar and Commit due to being idle. Signed-off-by: György Krajcsovits --- tsdb/head_append.go | 6 ++++++ tsdb/head_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 785e99db0..be53a4f3f 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -751,6 +751,12 @@ func (a *headAppender) Commit() (err error) { // No errors logging to WAL, so pass the exemplars along to the in memory storage. for _, e := range a.exemplars { s := a.head.series.getByID(chunks.HeadSeriesRef(e.ref)) + if s == nil { + // This is very unlikely to happen, but we have seen it in the wild. + // It means that the series was truncated between AppendExemplar and Commit. + // See TestHeadCompactionWhileAppendAndCommitExemplar. + continue + } // We don't instrument exemplar appends here, all is instrumented by storage. if err := a.head.exemplars.AddExemplar(s.lset, e.exemplar); err != nil { if err == storage.ErrOutOfOrderExemplar { diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 253f92d61..f2325039a 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -5514,3 +5514,31 @@ func TestWALSampleAndExemplarOrder(t *testing.T) { }) } } + +// TestHeadCompactionWhileAppendAndCommitExemplar simulates a use case where +// a series is removed from the head while an exemplar is being appended to it. +// This can happen in theory by compacting the head at the right time due to +// a series being idle. +// The test cheats a little bit by not appending a sample with the exemplar. +// If you also add a sample and run Truncate in a concurrent goroutine and run +// the test around a million(!) times, you can get +// `unknown HeadSeriesRef when trying to add exemplar: 1` error on push. +// It is likely that running the test for much longer and with more time variations +// would trigger the +// `signal SIGSEGV: segmentation violation code=0x1 addr=0x20 pc=0xbb03d1` +// panic, that we have seen in the wild once. +func TestHeadCompactionWhileAppendAndCommitExemplar(t *testing.T) { + h, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) + app := h.Appender(context.Background()) + lbls := labels.FromStrings("foo", "bar") + ref, err := app.Append(0, lbls, 1, 1) + require.NoError(t, err) + app.Commit() + // Not adding a sample here to trigger the fault. + app = h.Appender(context.Background()) + _, err = app.AppendExemplar(ref, lbls, exemplar.Exemplar{Value: 1, Ts: 20}) + require.NoError(t, err) + h.Truncate(10) + app.Commit() + h.Close() +} From 08c17df24434d289f13319928ef22f79f926d9b2 Mon Sep 17 00:00:00 2001 From: machine424 Date: Sat, 11 Nov 2023 14:07:09 +0100 Subject: [PATCH 577/632] remote/storage.go: add a test to highlight a race condition between Storage.Notify() and Storage.ApplyConfig() see https://github.com/prometheus/prometheus/issues/12747 Signed-off-by: machine424 --- storage/remote/storage_test.go | 38 ++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/storage/remote/storage_test.go b/storage/remote/storage_test.go index b2848f933..040a23a5a 100644 --- a/storage/remote/storage_test.go +++ b/storage/remote/storage_test.go @@ -14,7 +14,9 @@ package remote import ( + "fmt" "net/url" + "sync" "testing" common_config "github.com/prometheus/common/config" @@ -147,3 +149,39 @@ func baseRemoteReadConfig(host string) *config.RemoteReadConfig { } return &cfg } + +// TestWriteStorageApplyConfigsDuringCommit helps detecting races when +// ApplyConfig runs concurrently with Notify +// See https://github.com/prometheus/prometheus/issues/12747 +func TestWriteStorageApplyConfigsDuringCommit(t *testing.T) { + s := NewStorage(nil, nil, nil, t.TempDir(), defaultFlushDeadline, nil) + + var wg sync.WaitGroup + wg.Add(2000) + + start := make(chan struct{}) + for i := 0; i < 1000; i++ { + go func(i int) { + <-start + conf := &config.Config{ + GlobalConfig: config.DefaultGlobalConfig, + RemoteWriteConfigs: []*config.RemoteWriteConfig{ + baseRemoteWriteConfig(fmt.Sprintf("http://test-%d.com", i)), + }, + } + require.NoError(t, s.ApplyConfig(conf)) + wg.Done() + }(i) + } + + for i := 0; i < 1000; i++ { + go func() { + <-start + s.Notify() + wg.Done() + }() + } + + close(start) + wg.Wait() +} From 65a443e6e370c9996506fb3152db678966e7154d Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 13 Nov 2023 16:20:04 +0000 Subject: [PATCH 578/632] TSDB: initialize conflicts map only when we need it. Suggested by @songjiayang. Signed-off-by: Bryan Boreham --- tsdb/head.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tsdb/head.go b/tsdb/head.go index 410a226d8..fb90c9fa0 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -1707,6 +1707,9 @@ func (m seriesHashmap) set(hash uint64, s *memSeries) { m.unique[hash] = s return } + if m.conflicts == nil { + m.conflicts = make(map[uint64][]*memSeries) + } l := m.conflicts[hash] for i, prev := range l { if labels.Equal(prev.lset, s.lset) { @@ -1786,7 +1789,7 @@ func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *st for i := range s.hashes { s.hashes[i] = seriesHashmap{ unique: map[uint64]*memSeries{}, - conflicts: map[uint64][]*memSeries{}, + conflicts: nil, // Initialized on demand in set(). } } return s From 41758972e49091db065e83576859efbfbc32c804 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 13 Nov 2023 18:28:48 +0000 Subject: [PATCH 579/632] web/api: optimize labelnames/values with 1 set of matchers (#12888) * web/api: optimize labelnames/values with 1 set of matchers If there is exactly one set of matchers provided, we can skip adding the results to a map and getting them back out again. Signed-off-by: Bryan Boreham --------- Signed-off-by: Bryan Boreham --- web/api/v1/api.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 34abe80aa..671df7887 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -668,7 +668,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult { names []string warnings annotations.Annotations ) - if len(matcherSets) > 0 { + if len(matcherSets) > 1 { labelNamesSet := make(map[string]struct{}) for _, matchers := range matcherSets { @@ -690,7 +690,11 @@ func (api *API) labelNames(r *http.Request) apiFuncResult { } slices.Sort(names) } else { - names, warnings, err = q.LabelNames(r.Context()) + var matchers []*labels.Matcher + if len(matcherSets) == 1 { + matchers = matcherSets[0] + } + names, warnings, err = q.LabelNames(r.Context(), matchers...) if err != nil { return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil} } @@ -744,7 +748,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { vals []string warnings annotations.Annotations ) - if len(matcherSets) > 0 { + if len(matcherSets) > 1 { var callWarnings annotations.Annotations labelValuesSet := make(map[string]struct{}) for _, matchers := range matcherSets { @@ -763,7 +767,11 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { vals = append(vals, val) } } else { - vals, warnings, err = q.LabelValues(ctx, name) + var matchers []*labels.Matcher + if len(matcherSets) == 1 { + matchers = matcherSets[0] + } + vals, warnings, err = q.LabelValues(ctx, name, matchers...) if err != nil { return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer} } From 413b713aa8f4ed666ea2820d351333d4bb24fa7e Mon Sep 17 00:00:00 2001 From: machine424 Date: Mon, 13 Nov 2023 13:26:02 +0100 Subject: [PATCH 580/632] remote/storage.go: adjust Storage.Notify() to avoid a race condition with Storage.ApplyConfig() Signed-off-by: machine424 --- storage/remote/storage.go | 5 +---- storage/remote/write.go | 10 ++++++++++ 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/storage/remote/storage.go b/storage/remote/storage.go index b6533f927..758ba3cc9 100644 --- a/storage/remote/storage.go +++ b/storage/remote/storage.go @@ -77,10 +77,7 @@ func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCal } func (s *Storage) Notify() { - for _, q := range s.rws.queues { - // These should all be non blocking - q.watcher.Notify() - } + s.rws.Notify() } // ApplyConfig updates the state as the new config requires. diff --git a/storage/remote/write.go b/storage/remote/write.go index 4b0a24901..237f8caa9 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -121,6 +121,16 @@ func (rws *WriteStorage) run() { } } +func (rws *WriteStorage) Notify() { + rws.mtx.Lock() + defer rws.mtx.Unlock() + + for _, q := range rws.queues { + // These should all be non blocking + q.watcher.Notify() + } +} + // ApplyConfig updates the state as the new config requires. // Only stop & create queues which have changes. func (rws *WriteStorage) ApplyConfig(conf *config.Config) error { From 1bfb3ed062e99bd3c74e05d9ff9a7fa4e30bbe21 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 14 Nov 2023 11:36:35 +0000 Subject: [PATCH 581/632] Labels: reduce allocations when creating from TSDB WAL (#13044) * Labels: reduce allocations when creating from TSDB When reading the WAL, by passing references into the buffer we can avoid copying strings under `-tags stringlabels`. Signed-off-by: Bryan Boreham --- model/labels/labels.go | 7 +++++++ model/labels/labels_stringlabels.go | 6 ++++++ tsdb/record/record.go | 7 +++---- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/model/labels/labels.go b/model/labels/labels.go index 3dc3049b1..231460ea3 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -617,6 +617,13 @@ func (b *ScratchBuilder) Add(name, value string) { b.add = append(b.add, Label{Name: name, Value: value}) } +// Add a name/value pair, using []byte instead of string. +// The '-tags stringlabels' version of this function is unsafe, hence the name. +// This version is safe - it copies the strings immediately - but we keep the same name so everything compiles. +func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { + b.add = append(b.add, Label{Name: string(name), Value: string(value)}) +} + // Sort the labels added so far by name. func (b *ScratchBuilder) Sort() { slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index cc6bfcc70..bbb4452d4 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -829,6 +829,12 @@ func (b *ScratchBuilder) Add(name, value string) { b.add = append(b.add, Label{Name: name, Value: value}) } +// Add a name/value pair, using []byte instead of string to reduce memory allocations. +// The values must remain live until Labels() is called. +func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { + b.add = append(b.add, Label{Name: yoloString(name), Value: yoloString(value)}) +} + // Sort the labels added so far by name. func (b *ScratchBuilder) Sort() { slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) diff --git a/tsdb/record/record.go b/tsdb/record/record.go index 75c15c490..42a656dfe 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -279,13 +279,12 @@ func (d *Decoder) Metadata(rec []byte, metadata []RefMetadata) ([]RefMetadata, e // DecodeLabels decodes one set of labels from buf. func (d *Decoder) DecodeLabels(dec *encoding.Decbuf) labels.Labels { - // TODO: reconsider if this function could be pushed down into labels.Labels to be more efficient. d.builder.Reset() nLabels := dec.Uvarint() for i := 0; i < nLabels; i++ { - lName := dec.UvarintStr() - lValue := dec.UvarintStr() - d.builder.Add(lName, lValue) + lName := dec.UvarintBytes() + lValue := dec.UvarintBytes() + d.builder.UnsafeAddBytes(lName, lValue) } return d.builder.Labels() } From dd8871379a9af9dccc91031586c43c8c3ea1a511 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Tue, 14 Nov 2023 13:04:31 +0000 Subject: [PATCH 582/632] remplace errors.Errorf by fmt.Errorf Signed-off-by: Matthieu MOREL --- tsdb/block.go | 3 ++- tsdb/compact.go | 2 +- tsdb/db.go | 6 +++--- tsdb/db_test.go | 2 +- tsdb/head.go | 8 ++++---- tsdb/head_wal.go | 10 +++++----- tsdb/querier_test.go | 6 +++--- tsdb/repair.go | 3 ++- tsdb/wal.go | 14 +++++++------- tsdb/wlog/watcher.go | 4 ++-- 10 files changed, 30 insertions(+), 28 deletions(-) diff --git a/tsdb/block.go b/tsdb/block.go index 13a389970..b995e4fd5 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -17,6 +17,7 @@ package tsdb import ( "context" "encoding/json" + "fmt" "io" "os" "path/filepath" @@ -238,7 +239,7 @@ func readMetaFile(dir string) (*BlockMeta, int64, error) { return nil, 0, err } if m.Version != metaVersion1 { - return nil, 0, errors.Errorf("unexpected meta file version %d", m.Version) + return nil, 0, fmt.Errorf("unexpected meta file version %d", m.Version) } return &m, int64(len(b)), nil diff --git a/tsdb/compact.go b/tsdb/compact.go index f509380f8..32c88d2cc 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -151,7 +151,7 @@ func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l log.Log func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { if len(ranges) == 0 { - return nil, errors.Errorf("at least one range must be provided") + return nil, fmt.Errorf("at least one range must be provided") } if pool == nil { pool = chunkenc.NewPool() diff --git a/tsdb/db.go b/tsdb/db.go index 8b3d4d300..c4c05e390 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -662,7 +662,7 @@ func (db *DBReadOnly) Block(blockID string) (BlockReader, error) { _, err := os.Stat(filepath.Join(db.dir, blockID)) if os.IsNotExist(err) { - return nil, errors.Errorf("invalid block ID %s", blockID) + return nil, fmt.Errorf("invalid block ID %s", blockID) } block, err := OpenBlock(db.logger, filepath.Join(db.dir, blockID), nil) @@ -1834,10 +1834,10 @@ func (db *DB) ForceHeadMMap() { // will create a new block containing all data that's currently in the memory buffer/WAL. func (db *DB) Snapshot(dir string, withHead bool) error { if dir == db.dir { - return errors.Errorf("cannot snapshot into base directory") + return fmt.Errorf("cannot snapshot into base directory") } if _, err := ulid.ParseStrict(dir); err == nil { - return errors.Errorf("dir must not be a valid ULID") + return fmt.Errorf("dir must not be a valid ULID") } db.cmtx.Lock() diff --git a/tsdb/db_test.go b/tsdb/db_test.go index f021faba9..c7ea068d6 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -3082,7 +3082,7 @@ func deleteNonBlocks(dbDir string) error { } for _, dir := range dirs { if ok := isBlockDir(dir); !ok { - return errors.Errorf("root folder:%v still hase non block directory:%v", dbDir, dir.Name()) + return fmt.Errorf("root folder:%v still hase non block directory:%v", dbDir, dir.Name()) } } return nil diff --git a/tsdb/head.go b/tsdb/head.go index d096bc631..f7e697e54 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -224,11 +224,11 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal, wbl *wlog.WL, opts *Hea // even if ooo is not enabled yet. capMax := opts.OutOfOrderCapMax.Load() if capMax <= 0 || capMax > 255 { - return nil, errors.Errorf("OOOCapMax of %d is invalid. must be > 0 and <= 255", capMax) + return nil, fmt.Errorf("OOOCapMax of %d is invalid. must be > 0 and <= 255", capMax) } if opts.ChunkRange < 1 { - return nil, errors.Errorf("invalid chunk range %d", opts.ChunkRange) + return nil, fmt.Errorf("invalid chunk range %d", opts.ChunkRange) } if opts.SeriesCallback == nil { opts.SeriesCallback = &noopSeriesLifecycleCallback{} @@ -857,7 +857,7 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) slice := mmappedChunks[seriesRef] if len(slice) > 0 && slice[len(slice)-1].maxTime >= mint { h.metrics.mmapChunkCorruptionTotal.Inc() - return errors.Errorf("out of sequence m-mapped chunk for series ref %d, last chunk: [%d, %d], new: [%d, %d]", + return fmt.Errorf("out of sequence m-mapped chunk for series ref %d, last chunk: [%d, %d], new: [%d, %d]", seriesRef, slice[len(slice)-1].minTime, slice[len(slice)-1].maxTime, mint, maxt) } slice = append(slice, &mmappedChunk{ @@ -872,7 +872,7 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) if len(ms.mmappedChunks) > 0 && ms.mmappedChunks[len(ms.mmappedChunks)-1].maxTime >= mint { h.metrics.mmapChunkCorruptionTotal.Inc() - return errors.Errorf("out of sequence m-mapped chunk for series ref %d, last chunk: [%d, %d], new: [%d, %d]", + return fmt.Errorf("out of sequence m-mapped chunk for series ref %d, last chunk: [%d, %d], new: [%d, %d]", seriesRef, ms.mmappedChunks[len(ms.mmappedChunks)-1].minTime, ms.mmappedChunks[len(ms.mmappedChunks)-1].maxTime, mint, maxt) } diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 34948f917..07fa8280c 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -970,7 +970,7 @@ func decodeSeriesFromChunkSnapshot(d *record.Decoder, b []byte) (csr chunkSnapsh dec := encoding.Decbuf{B: b} if flag := dec.Byte(); flag != chunkSnapshotRecordTypeSeries { - return csr, errors.Errorf("invalid record type %x", flag) + return csr, fmt.Errorf("invalid record type %x", flag) } csr.ref = chunks.HeadSeriesRef(dec.Be64()) @@ -1018,7 +1018,7 @@ func decodeSeriesFromChunkSnapshot(d *record.Decoder, b []byte) (csr chunkSnapsh err = dec.Err() if err != nil && len(dec.B) > 0 { - err = errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + err = fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return @@ -1041,7 +1041,7 @@ func decodeTombstonesSnapshotRecord(b []byte) (tombstones.Reader, error) { dec := encoding.Decbuf{B: b} if flag := dec.Byte(); flag != chunkSnapshotRecordTypeTombstones { - return nil, errors.Errorf("invalid record type %x", flag) + return nil, fmt.Errorf("invalid record type %x", flag) } tr, err := tombstones.Decode(dec.UvarintBytes()) @@ -1254,7 +1254,7 @@ func LastChunkSnapshot(dir string) (string, int, int, error) { continue } if !fi.IsDir() { - return "", 0, 0, errors.Errorf("chunk snapshot %s is not a directory", fi.Name()) + return "", 0, 0, fmt.Errorf("chunk snapshot %s is not a directory", fi.Name()) } splits := strings.Split(fi.Name()[len(chunkSnapshotPrefix):], ".") @@ -1492,7 +1492,7 @@ Outer: default: // This is a record type we don't understand. It is either and old format from earlier versions, // or a new format and the code was rolled back to old version. - loopErr = errors.Errorf("unsupported snapshot record type 0b%b", rec[0]) + loopErr = fmt.Errorf("unsupported snapshot record type 0b%b", rec[0]) break Outer } } diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 09f76034b..3c27ab2f3 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -710,7 +710,7 @@ func createFakeReaderAndNotPopulatedChunks(s ...[]chunks.Sample) (*fakeChunksRea func (r *fakeChunksReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { chk, ok := r.chks[meta.Ref] if !ok { - return nil, errors.Errorf("chunk not found at ref %v", meta.Ref) + return nil, fmt.Errorf("chunk not found at ref %v", meta.Ref) } return chk, nil } @@ -1831,7 +1831,7 @@ func (m mockIndex) Symbols() index.StringIter { func (m *mockIndex) AddSeries(ref storage.SeriesRef, l labels.Labels, chunks ...chunks.Meta) error { if _, ok := m.series[ref]; ok { - return errors.Errorf("series with reference %d already added", ref) + return fmt.Errorf("series with reference %d already added", ref) } l.Range(func(lbl labels.Label) { m.symbols[lbl.Name] = struct{}{} @@ -1852,7 +1852,7 @@ func (m *mockIndex) AddSeries(ref storage.SeriesRef, l labels.Labels, chunks ... func (m mockIndex) WritePostings(name, value string, it index.Postings) error { l := labels.Label{Name: name, Value: value} if _, ok := m.postings[l]; ok { - return errors.Errorf("postings for %s already added", l) + return fmt.Errorf("postings for %s already added", l) } ep, err := index.ExpandPostings(it) if err != nil { diff --git a/tsdb/repair.go b/tsdb/repair.go index 0c2e08791..081116454 100644 --- a/tsdb/repair.go +++ b/tsdb/repair.go @@ -15,6 +15,7 @@ package tsdb import ( "encoding/json" + "fmt" "io" "os" "path/filepath" @@ -124,7 +125,7 @@ func readBogusMetaFile(dir string) (*BlockMeta, error) { return nil, err } if m.Version != metaVersion1 && m.Version != 2 { - return nil, errors.Errorf("unexpected meta file version %d", m.Version) + return nil, fmt.Errorf("unexpected meta file version %d", m.Version) } return &m, nil } diff --git a/tsdb/wal.go b/tsdb/wal.go index af83127bb..bc7db35bf 100644 --- a/tsdb/wal.go +++ b/tsdb/wal.go @@ -525,14 +525,14 @@ func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) { case err != nil: return nil, errors.Wrapf(err, "validate meta %q", f.Name()) case n != 8: - return nil, errors.Errorf("invalid header size %d in %q", n, f.Name()) + return nil, fmt.Errorf("invalid header size %d in %q", n, f.Name()) } if m := binary.BigEndian.Uint32(metab[:4]); m != WALMagic { - return nil, errors.Errorf("invalid magic header %x in %q", m, f.Name()) + return nil, fmt.Errorf("invalid magic header %x in %q", m, f.Name()) } if metab[4] != WALFormatDefault { - return nil, errors.Errorf("unknown WAL segment format %d in %q", metab[4], f.Name()) + return nil, fmt.Errorf("unknown WAL segment format %d in %q", metab[4], f.Name()) } hasError = false return f, nil @@ -1052,7 +1052,7 @@ func (e walCorruptionErr) Error() string { func (r *walReader) corruptionErr(s string, args ...interface{}) error { return walCorruptionErr{ - err: errors.Errorf(s, args...), + err: fmt.Errorf(s, args...), file: r.cur, lastOffset: r.lastOffset, } @@ -1124,7 +1124,7 @@ func (r *walReader) decodeSeries(flag byte, b []byte, res *[]record.RefSeries) e return dec.Err() } if len(dec.B) > 0 { - return errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return nil } @@ -1156,7 +1156,7 @@ func (r *walReader) decodeSamples(flag byte, b []byte, res *[]record.RefSample) return errors.Wrapf(dec.Err(), "decode error after %d samples", len(*res)) } if len(dec.B) > 0 { - return errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return nil } @@ -1176,7 +1176,7 @@ func (r *walReader) decodeDeletes(flag byte, b []byte, res *[]tombstones.Stone) return dec.Err() } if len(dec.B) > 0 { - return errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return nil } diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index 221e9607c..5689602e7 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -747,12 +747,12 @@ func checkpointNum(dir string) (int, error) { // dir may contain a hidden directory, so only check the base directory chunks := strings.Split(filepath.Base(dir), ".") if len(chunks) != 2 { - return 0, errors.Errorf("invalid checkpoint dir string: %s", dir) + return 0, fmt.Errorf("invalid checkpoint dir string: %s", dir) } result, err := strconv.Atoi(chunks[1]) if err != nil { - return 0, errors.Errorf("invalid checkpoint dir string: %s", dir) + return 0, fmt.Errorf("invalid checkpoint dir string: %s", dir) } return result, nil From c92fbf3fdf40aa2f687c6dcfaccfc6970d382c94 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 7 Nov 2023 16:37:37 -0600 Subject: [PATCH 583/632] Add feature flag for PromQL experimental functions. This PR adds an Experimental flag to the functions. This can be used by https://github.com/prometheus/prometheus/pull/13059 but also xrate and other future functions. Signed-off-by: Julien Pivotto --- cmd/prometheus/main.go | 6 +- docs/command-line/prometheus.md | 2 +- docs/feature_flags.md | 9 +- promql/parser/functions.go | 12 +- promql/parser/generated_parser.y | 5 +- promql/parser/generated_parser.y.go | 187 ++++++++++++++-------------- 6 files changed, 121 insertions(+), 100 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 81699835a..4112cd842 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -63,6 +63,7 @@ import ( "github.com/prometheus/prometheus/notifier" _ "github.com/prometheus/prometheus/plugins" // Register plugins. "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" @@ -199,6 +200,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { case "no-default-scrape-port": c.scrape.NoDefaultPort = true level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.") + case "promql-experimental-functions": + parser.EnableExperimentalFunctions = true + level.Info(logger).Log("msg", "Experimental PromQL functions enabled.") case "native-histograms": c.tsdb.EnableNativeHistograms = true // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. @@ -419,7 +423,7 @@ func main() { a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) - a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). + a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). Default("").StringsVar(&cfg.featureList) promlogflag.AddFlags(a, &cfg.promlogConfig) diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index 78ec205f2..cd6dac555 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -52,7 +52,7 @@ The Prometheus monitoring server | --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | -| --enable-feature | Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --enable-feature | Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` | diff --git a/docs/feature_flags.md b/docs/feature_flags.md index f580c959f..d57763af0 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -187,4 +187,11 @@ This should **only** be applied to metrics that currently produce such labels. The OTLP receiver allows Prometheus to accept [OpenTelemetry](https://opentelemetry.io/) metrics writes. Prometheus is best used as a Pull based system, and staleness, `up` metric, and other Pull enabled features -won't work when you push OTLP metrics. \ No newline at end of file +won't work when you push OTLP metrics. + +## Experimental PromQL functions + +`--enable-feature=promql-experimental-functions` + +Enables PromQL functions that are considered experimental and whose name or +semantics could change. diff --git a/promql/parser/functions.go b/promql/parser/functions.go index 45a30219e..8d9d92aa1 100644 --- a/promql/parser/functions.go +++ b/promql/parser/functions.go @@ -16,12 +16,16 @@ package parser // Function represents a function of the expression language and is // used by function nodes. type Function struct { - Name string - ArgTypes []ValueType - Variadic int - ReturnType ValueType + Name string + ArgTypes []ValueType + Variadic int + ReturnType ValueType + Experimental bool } +// EnableExperimentalFunctions controls whether experimentalFunctions are enabled. +var EnableExperimentalFunctions bool + // Functions is a list of all functions supported by PromQL, including their types. var Functions = map[string]*Function{ "abs": { diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index 676fd9fb5..dce79f769 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -22,7 +22,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/promql/parser/posrange" + "github.com/prometheus/prometheus/promql/parser/posrange" ) %} @@ -369,6 +369,9 @@ function_call : IDENTIFIER function_call_body if !exist{ yylex.(*parser).addParseErrf($1.PositionRange(),"unknown function with name %q", $1.Val) } + if fn != nil && fn.Experimental && !EnableExperimentalFunctions { + yylex.(*parser).addParseErrf($1.PositionRange(),"function %q is not enabled", $1.Val) + } $$ = &Call{ Func: fn, Args: $2.(Expressions), diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index 77a403be3..4057d9163 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -230,7 +230,7 @@ const yyEofCode = 1 const yyErrCode = 2 const yyInitialStackSize = 16 -//line promql/parser/generated_parser.y:916 +//line promql/parser/generated_parser.y:919 //line yacctab:1 var yyExca = [...]int16{ @@ -1277,6 +1277,9 @@ yydefault: if !exist { yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "unknown function with name %q", yyDollar[1].item.Val) } + if fn != nil && fn.Experimental && !EnableExperimentalFunctions { + yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "function %q is not enabled", yyDollar[1].item.Val) + } yyVAL.node = &Call{ Func: fn, Args: yyDollar[2].node.(Expressions), @@ -1288,86 +1291,86 @@ yydefault: } case 61: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:384 +//line promql/parser/generated_parser.y:387 { yyVAL.node = yyDollar[2].node } case 62: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:386 +//line promql/parser/generated_parser.y:389 { yyVAL.node = Expressions{} } case 63: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:390 +//line promql/parser/generated_parser.y:393 { yyVAL.node = append(yyDollar[1].node.(Expressions), yyDollar[3].node.(Expr)) } case 64: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:392 +//line promql/parser/generated_parser.y:395 { yyVAL.node = Expressions{yyDollar[1].node.(Expr)} } case 65: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:394 +//line promql/parser/generated_parser.y:397 { yylex.(*parser).addParseErrf(yyDollar[2].item.PositionRange(), "trailing commas not allowed in function call args") yyVAL.node = yyDollar[1].node } case 66: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:405 +//line promql/parser/generated_parser.y:408 { yyVAL.node = &ParenExpr{Expr: yyDollar[2].node.(Expr), PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item)} } case 67: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:413 +//line promql/parser/generated_parser.y:416 { yylex.(*parser).addOffset(yyDollar[1].node, yyDollar[3].duration) yyVAL.node = yyDollar[1].node } case 68: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:418 +//line promql/parser/generated_parser.y:421 { yylex.(*parser).addOffset(yyDollar[1].node, -yyDollar[4].duration) yyVAL.node = yyDollar[1].node } case 69: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:423 +//line promql/parser/generated_parser.y:426 { yylex.(*parser).unexpected("offset", "duration") yyVAL.node = yyDollar[1].node } case 70: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:430 +//line promql/parser/generated_parser.y:433 { yylex.(*parser).setTimestamp(yyDollar[1].node, yyDollar[3].float) yyVAL.node = yyDollar[1].node } case 71: yyDollar = yyS[yypt-5 : yypt+1] -//line promql/parser/generated_parser.y:435 +//line promql/parser/generated_parser.y:438 { yylex.(*parser).setAtModifierPreprocessor(yyDollar[1].node, yyDollar[3].item) yyVAL.node = yyDollar[1].node } case 72: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:440 +//line promql/parser/generated_parser.y:443 { yylex.(*parser).unexpected("@", "timestamp") yyVAL.node = yyDollar[1].node } case 75: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:450 +//line promql/parser/generated_parser.y:453 { var errMsg string vs, ok := yyDollar[1].node.(*VectorSelector) @@ -1392,7 +1395,7 @@ yydefault: } case 76: yyDollar = yyS[yypt-6 : yypt+1] -//line promql/parser/generated_parser.y:475 +//line promql/parser/generated_parser.y:478 { yyVAL.node = &SubqueryExpr{ Expr: yyDollar[1].node.(Expr), @@ -1404,35 +1407,35 @@ yydefault: } case 77: yyDollar = yyS[yypt-6 : yypt+1] -//line promql/parser/generated_parser.y:485 +//line promql/parser/generated_parser.y:488 { yylex.(*parser).unexpected("subquery selector", "\"]\"") yyVAL.node = yyDollar[1].node } case 78: yyDollar = yyS[yypt-5 : yypt+1] -//line promql/parser/generated_parser.y:487 +//line promql/parser/generated_parser.y:490 { yylex.(*parser).unexpected("subquery selector", "duration or \"]\"") yyVAL.node = yyDollar[1].node } case 79: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:489 +//line promql/parser/generated_parser.y:492 { yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\"") yyVAL.node = yyDollar[1].node } case 80: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:491 +//line promql/parser/generated_parser.y:494 { yylex.(*parser).unexpected("subquery selector", "duration") yyVAL.node = yyDollar[1].node } case 81: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:501 +//line promql/parser/generated_parser.y:504 { if nl, ok := yyDollar[2].node.(*NumberLiteral); ok { if yyDollar[1].item.Typ == SUB { @@ -1446,7 +1449,7 @@ yydefault: } case 82: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:519 +//line promql/parser/generated_parser.y:522 { vs := yyDollar[2].node.(*VectorSelector) vs.PosRange = mergeRanges(&yyDollar[1].item, vs) @@ -1456,7 +1459,7 @@ yydefault: } case 83: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:527 +//line promql/parser/generated_parser.y:530 { vs := &VectorSelector{ Name: yyDollar[1].item.Val, @@ -1468,7 +1471,7 @@ yydefault: } case 84: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:537 +//line promql/parser/generated_parser.y:540 { vs := yyDollar[1].node.(*VectorSelector) yylex.(*parser).assembleVectorSelector(vs) @@ -1476,7 +1479,7 @@ yydefault: } case 85: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:545 +//line promql/parser/generated_parser.y:548 { yyVAL.node = &VectorSelector{ LabelMatchers: yyDollar[2].matchers, @@ -1485,7 +1488,7 @@ yydefault: } case 86: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:552 +//line promql/parser/generated_parser.y:555 { yyVAL.node = &VectorSelector{ LabelMatchers: yyDollar[2].matchers, @@ -1494,7 +1497,7 @@ yydefault: } case 87: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:559 +//line promql/parser/generated_parser.y:562 { yyVAL.node = &VectorSelector{ LabelMatchers: []*labels.Matcher{}, @@ -1503,7 +1506,7 @@ yydefault: } case 88: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:568 +//line promql/parser/generated_parser.y:571 { if yyDollar[1].matchers != nil { yyVAL.matchers = append(yyDollar[1].matchers, yyDollar[3].matcher) @@ -1513,47 +1516,47 @@ yydefault: } case 89: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:576 +//line promql/parser/generated_parser.y:579 { yyVAL.matchers = []*labels.Matcher{yyDollar[1].matcher} } case 90: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:578 +//line promql/parser/generated_parser.y:581 { yylex.(*parser).unexpected("label matching", "\",\" or \"}\"") yyVAL.matchers = yyDollar[1].matchers } case 91: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:582 +//line promql/parser/generated_parser.y:585 { yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) } case 92: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:584 +//line promql/parser/generated_parser.y:587 { yylex.(*parser).unexpected("label matching", "string") yyVAL.matcher = nil } case 93: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:586 +//line promql/parser/generated_parser.y:589 { yylex.(*parser).unexpected("label matching", "label matching operator") yyVAL.matcher = nil } case 94: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:588 +//line promql/parser/generated_parser.y:591 { yylex.(*parser).unexpected("label matching", "identifier or \"}\"") yyVAL.matcher = nil } case 95: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:596 +//line promql/parser/generated_parser.y:599 { b := labels.NewBuilder(yyDollar[2].labels) b.Set(labels.MetricName, yyDollar[1].item.Val) @@ -1561,83 +1564,83 @@ yydefault: } case 96: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:598 +//line promql/parser/generated_parser.y:601 { yyVAL.labels = yyDollar[1].labels } case 119: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:605 +//line promql/parser/generated_parser.y:608 { yyVAL.labels = labels.New(yyDollar[2].lblList...) } case 120: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:607 +//line promql/parser/generated_parser.y:610 { yyVAL.labels = labels.New(yyDollar[2].lblList...) } case 121: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:609 +//line promql/parser/generated_parser.y:612 { yyVAL.labels = labels.New() } case 122: yyDollar = yyS[yypt-0 : yypt+1] -//line promql/parser/generated_parser.y:611 +//line promql/parser/generated_parser.y:614 { yyVAL.labels = labels.New() } case 123: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:615 +//line promql/parser/generated_parser.y:618 { yyVAL.lblList = append(yyDollar[1].lblList, yyDollar[3].label) } case 124: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:617 +//line promql/parser/generated_parser.y:620 { yyVAL.lblList = []labels.Label{yyDollar[1].label} } case 125: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:619 +//line promql/parser/generated_parser.y:622 { yylex.(*parser).unexpected("label set", "\",\" or \"}\"") yyVAL.lblList = yyDollar[1].lblList } case 126: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:624 +//line promql/parser/generated_parser.y:627 { yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} } case 127: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:626 +//line promql/parser/generated_parser.y:629 { yylex.(*parser).unexpected("label set", "string") yyVAL.label = labels.Label{} } case 128: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:628 +//line promql/parser/generated_parser.y:631 { yylex.(*parser).unexpected("label set", "\"=\"") yyVAL.label = labels.Label{} } case 129: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:630 +//line promql/parser/generated_parser.y:633 { yylex.(*parser).unexpected("label set", "identifier or \"}\"") yyVAL.label = labels.Label{} } case 130: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:641 +//line promql/parser/generated_parser.y:644 { yylex.(*parser).generatedParserResult = &seriesDescription{ labels: yyDollar[1].labels, @@ -1646,38 +1649,38 @@ yydefault: } case 131: yyDollar = yyS[yypt-0 : yypt+1] -//line promql/parser/generated_parser.y:650 +//line promql/parser/generated_parser.y:653 { yyVAL.series = []SequenceValue{} } case 132: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:652 +//line promql/parser/generated_parser.y:655 { yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...) } case 133: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:654 +//line promql/parser/generated_parser.y:657 { yyVAL.series = yyDollar[1].series } case 134: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:656 +//line promql/parser/generated_parser.y:659 { yylex.(*parser).unexpected("series values", "") yyVAL.series = nil } case 135: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:660 +//line promql/parser/generated_parser.y:663 { yyVAL.series = []SequenceValue{{Omitted: true}} } case 136: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:662 +//line promql/parser/generated_parser.y:665 { yyVAL.series = []SequenceValue{} for i := uint64(0); i < yyDollar[3].uint; i++ { @@ -1686,13 +1689,13 @@ yydefault: } case 137: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:669 +//line promql/parser/generated_parser.y:672 { yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}} } case 138: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:671 +//line promql/parser/generated_parser.y:674 { yyVAL.series = []SequenceValue{} // Add an additional value for time 0, which we ignore in tests. @@ -1702,7 +1705,7 @@ yydefault: } case 139: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:679 +//line promql/parser/generated_parser.y:682 { yyVAL.series = []SequenceValue{} // Add an additional value for time 0, which we ignore in tests. @@ -1713,13 +1716,13 @@ yydefault: } case 140: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:689 +//line promql/parser/generated_parser.y:692 { yyVAL.series = []SequenceValue{{Histogram: yyDollar[1].histogram}} } case 141: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:693 +//line promql/parser/generated_parser.y:696 { yyVAL.series = []SequenceValue{} // Add an additional value for time 0, which we ignore in tests. @@ -1730,7 +1733,7 @@ yydefault: } case 142: yyDollar = yyS[yypt-5 : yypt+1] -//line promql/parser/generated_parser.y:702 +//line promql/parser/generated_parser.y:705 { val, err := yylex.(*parser).histogramsIncreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) if err != nil { @@ -1740,7 +1743,7 @@ yydefault: } case 143: yyDollar = yyS[yypt-5 : yypt+1] -//line promql/parser/generated_parser.y:710 +//line promql/parser/generated_parser.y:713 { val, err := yylex.(*parser).histogramsDecreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) if err != nil { @@ -1750,7 +1753,7 @@ yydefault: } case 144: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:720 +//line promql/parser/generated_parser.y:723 { if yyDollar[1].item.Val != "stale" { yylex.(*parser).unexpected("series values", "number or \"stale\"") @@ -1759,138 +1762,138 @@ yydefault: } case 147: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:732 +//line promql/parser/generated_parser.y:735 { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } case 148: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:736 +//line promql/parser/generated_parser.y:739 { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } case 149: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:740 +//line promql/parser/generated_parser.y:743 { m := yylex.(*parser).newMap() yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } case 150: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:745 +//line promql/parser/generated_parser.y:748 { m := yylex.(*parser).newMap() yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } case 151: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:753 +//line promql/parser/generated_parser.y:756 { yyVAL.descriptors = *(yylex.(*parser).mergeMaps(&yyDollar[1].descriptors, &yyDollar[3].descriptors)) } case 152: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:757 +//line promql/parser/generated_parser.y:760 { yyVAL.descriptors = yyDollar[1].descriptors } case 153: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:760 +//line promql/parser/generated_parser.y:763 { yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]") } case 154: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:767 +//line promql/parser/generated_parser.y:770 { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["schema"] = yyDollar[3].int } case 155: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:772 +//line promql/parser/generated_parser.y:775 { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["sum"] = yyDollar[3].float } case 156: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:777 +//line promql/parser/generated_parser.y:780 { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["count"] = yyDollar[3].float } case 157: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:782 +//line promql/parser/generated_parser.y:785 { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["z_bucket"] = yyDollar[3].float } case 158: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:787 +//line promql/parser/generated_parser.y:790 { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float } case 159: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:792 +//line promql/parser/generated_parser.y:795 { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set } case 160: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:797 +//line promql/parser/generated_parser.y:800 { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["offset"] = yyDollar[3].int } case 161: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:802 +//line promql/parser/generated_parser.y:805 { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set } case 162: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:807 +//line promql/parser/generated_parser.y:810 { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["n_offset"] = yyDollar[3].int } case 163: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:814 +//line promql/parser/generated_parser.y:817 { yyVAL.bucket_set = yyDollar[2].bucket_set } case 164: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:818 +//line promql/parser/generated_parser.y:821 { yyVAL.bucket_set = yyDollar[2].bucket_set } case 165: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:824 +//line promql/parser/generated_parser.y:827 { yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float) } case 166: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:828 +//line promql/parser/generated_parser.y:831 { yyVAL.bucket_set = []float64{yyDollar[1].float} } case 213: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:853 +//line promql/parser/generated_parser.y:856 { yyVAL.node = &NumberLiteral{ Val: yylex.(*parser).number(yyDollar[1].item.Val), @@ -1899,25 +1902,25 @@ yydefault: } case 214: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:861 +//line promql/parser/generated_parser.y:864 { yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val) } case 215: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:863 +//line promql/parser/generated_parser.y:866 { yyVAL.float = yyDollar[2].float } case 216: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:864 +//line promql/parser/generated_parser.y:867 { yyVAL.float = -yyDollar[2].float } case 219: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:870 +//line promql/parser/generated_parser.y:873 { var err error yyVAL.uint, err = strconv.ParseUint(yyDollar[1].item.Val, 10, 64) @@ -1927,19 +1930,19 @@ yydefault: } case 220: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:879 +//line promql/parser/generated_parser.y:882 { yyVAL.int = -int64(yyDollar[2].uint) } case 221: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:880 +//line promql/parser/generated_parser.y:883 { yyVAL.int = int64(yyDollar[1].uint) } case 222: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:884 +//line promql/parser/generated_parser.y:887 { var err error yyVAL.duration, err = parseDuration(yyDollar[1].item.Val) @@ -1949,7 +1952,7 @@ yydefault: } case 223: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:895 +//line promql/parser/generated_parser.y:898 { yyVAL.node = &StringLiteral{ Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val), @@ -1958,13 +1961,13 @@ yydefault: } case 224: yyDollar = yyS[yypt-0 : yypt+1] -//line promql/parser/generated_parser.y:908 +//line promql/parser/generated_parser.y:911 { yyVAL.duration = 0 } case 226: yyDollar = yyS[yypt-0 : yypt+1] -//line promql/parser/generated_parser.y:912 +//line promql/parser/generated_parser.y:915 { yyVAL.strings = nil } From e3041740e4d3b9cd9eda17a737e9e99102f24331 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Tue, 14 Nov 2023 19:04:30 +0100 Subject: [PATCH 584/632] tsdb/fileutil: use Go standard errors Signed-off-by: Matthieu MOREL --- tsdb/fileutil/mmap.go | 9 ++++----- tsdb/fileutil/preallocate_linux.go | 9 +++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tsdb/fileutil/mmap.go b/tsdb/fileutil/mmap.go index 4dbca4f97..782ff27ec 100644 --- a/tsdb/fileutil/mmap.go +++ b/tsdb/fileutil/mmap.go @@ -14,9 +14,8 @@ package fileutil import ( + "fmt" "os" - - "github.com/pkg/errors" ) type MmapFile struct { @@ -31,7 +30,7 @@ func OpenMmapFile(path string) (*MmapFile, error) { func OpenMmapFileWithSize(path string, size int) (mf *MmapFile, retErr error) { f, err := os.Open(path) if err != nil { - return nil, errors.Wrap(err, "try lock file") + return nil, fmt.Errorf("try lock file: %w", err) } defer func() { if retErr != nil { @@ -41,14 +40,14 @@ func OpenMmapFileWithSize(path string, size int) (mf *MmapFile, retErr error) { if size <= 0 { info, err := f.Stat() if err != nil { - return nil, errors.Wrap(err, "stat") + return nil, fmt.Errorf("stat: %w", err) } size = int(info.Size()) } b, err := mmap(f, size) if err != nil { - return nil, errors.Wrapf(err, "mmap, size %d", size) + return nil, fmt.Errorf("mmap, size %d: %w", size, err) } return &MmapFile{f: f, b: b}, nil diff --git a/tsdb/fileutil/preallocate_linux.go b/tsdb/fileutil/preallocate_linux.go index ada046221..026c69b35 100644 --- a/tsdb/fileutil/preallocate_linux.go +++ b/tsdb/fileutil/preallocate_linux.go @@ -15,6 +15,7 @@ package fileutil import ( + "errors" "os" "syscall" ) @@ -23,10 +24,10 @@ func preallocExtend(f *os.File, sizeInBytes int64) error { // use mode = 0 to change size err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes) if err != nil { - errno, ok := err.(syscall.Errno) + var errno syscall.Errno // not supported; fallback // fallocate EINTRs frequently in some environments; fallback - if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) { + if errors.As(err, &errno) && (errno == syscall.ENOTSUP || errno == syscall.EINTR) { return preallocExtendTrunc(f, sizeInBytes) } } @@ -37,9 +38,9 @@ func preallocFixed(f *os.File, sizeInBytes int64) error { // use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes) if err != nil { - errno, ok := err.(syscall.Errno) + var errno syscall.Errno // treat not supported as nil error - if ok && errno == syscall.ENOTSUP { + if errors.As(err, &errno) && errno == syscall.ENOTSUP { return nil } } From e60a508dd8286a707e1db64a3327de61f2c955c4 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Tue, 14 Nov 2023 18:48:53 +0100 Subject: [PATCH 585/632] tsdb/errors: fix errorlint linter Signed-off-by: Matthieu MOREL --- tsdb/errors/errors.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tsdb/errors/errors.go b/tsdb/errors/errors.go index 21449e895..6a8e72f04 100644 --- a/tsdb/errors/errors.go +++ b/tsdb/errors/errors.go @@ -38,7 +38,8 @@ func (es *multiError) Add(errs ...error) { if err == nil { continue } - if merr, ok := err.(nonNilMultiError); ok { + var merr nonNilMultiError + if errors.As(err, &merr) { *es = append(*es, merr.errs...) continue } From d7c3bc4cb02586350ad347a6222cd0dcdd03a900 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Tue, 14 Nov 2023 20:46:36 +0100 Subject: [PATCH 586/632] tsdb/tsdbutil: use Go standard errors Signed-off-by: Matthieu MOREL --- tsdb/tsdbutil/dir_locker.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tsdb/tsdbutil/dir_locker.go b/tsdb/tsdbutil/dir_locker.go index 155f58641..fa939879c 100644 --- a/tsdb/tsdbutil/dir_locker.go +++ b/tsdb/tsdbutil/dir_locker.go @@ -14,13 +14,13 @@ package tsdbutil import ( + "errors" "fmt" "os" "path/filepath" "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -83,7 +83,7 @@ func (l *DirLocker) Lock() error { lockf, _, err := fileutil.Flock(l.path) if err != nil { - return errors.Wrap(err, "lock DB directory") + return fmt.Errorf("lock DB directory: %w", err) } l.releaser = lockf return nil From a99f48cc9f984ba9cc3d831231d42e6ba7afe745 Mon Sep 17 00:00:00 2001 From: Goutham Date: Wed, 15 Nov 2023 15:09:15 +0100 Subject: [PATCH 587/632] Bump OTel Collector dependency to v0.88.0 I initially didn't copy the otlptranslator/prometheus folder because I assumed it wouldn't get changes. But it did. So this PR fixes that and updates the Collector version. Supersedes: https://github.com/prometheus/prometheus/pull/12809 Signed-off-by: Goutham --- go.mod | 1 + go.sum | 2 + .../prometheus/normalize_label.go | 22 ++- .../prometheus/normalize_label_test.go | 19 -- .../prometheus/normalize_name.go | 67 +++++-- .../prometheus/normalize_name_test.go | 180 ------------------ .../prometheus/testutils_test.go | 34 ---- .../otlptranslator/prometheus/unit_to_ucum.go | 90 +++++++++ .../prometheusremotewrite/helper.go | 137 +++++++------ .../prometheusremotewrite/histograms.go | 82 ++++++-- .../prometheusremotewrite/metrics_to_prw.go | 4 +- .../number_data_points.go | 4 +- storage/remote/otlptranslator/update-copy.sh | 5 +- 13 files changed, 308 insertions(+), 339 deletions(-) delete mode 100644 storage/remote/otlptranslator/prometheus/normalize_label_test.go delete mode 100644 storage/remote/otlptranslator/prometheus/normalize_name_test.go delete mode 100644 storage/remote/otlptranslator/prometheus/testutils_test.go create mode 100644 storage/remote/otlptranslator/prometheus/unit_to_ucum.go diff --git a/go.mod b/go.mod index 44cf7c4b2..d1396dce6 100644 --- a/go.mod +++ b/go.mod @@ -55,6 +55,7 @@ require ( github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.8.4 github.com/vultr/govultr/v2 v2.17.2 + go.opentelemetry.io/collector/featuregate v0.77.0 go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 go.opentelemetry.io/collector/semconv v0.88.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 diff --git a/go.sum b/go.sum index c4658fbdb..46a1dd1f6 100644 --- a/go.sum +++ b/go.sum @@ -760,6 +760,8 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/collector/featuregate v0.77.0 h1:m1/IzaXoQh6SgF6CM80vrBOCf5zSJ2GVISfA27fYzGU= +go.opentelemetry.io/collector/featuregate v0.77.0/go.mod h1:/kVAsGUCyJXIDSgHftCN63QiwAEVHRLX2Kh/S+dqgHY= go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 h1:AgALhc2VenoA5l1DvTdg7mkzaBGqoTSuMkAtjsttBFo= go.opentelemetry.io/collector/pdata v1.0.0-rcv0017/go.mod h1:Rv9fOclA5AtM/JGm0d4jBOIAo1+jBA13UT5Bx0ovXi4= go.opentelemetry.io/collector/semconv v0.88.0 h1:8TVP4hYaUC87S6CCLKNoSxsUE0ChldE4vqotvNHHUnE= diff --git a/storage/remote/otlptranslator/prometheus/normalize_label.go b/storage/remote/otlptranslator/prometheus/normalize_label.go index 9f37c0af2..af0960e86 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_label.go +++ b/storage/remote/otlptranslator/prometheus/normalize_label.go @@ -1,21 +1,31 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package normalize +package prometheus // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" import ( "strings" "unicode" + + "go.opentelemetry.io/collector/featuregate" ) -// Normalizes the specified label to follow Prometheus label names standard. +var dropSanitizationGate = featuregate.GlobalRegistry().MustRegister( + "pkg.translator.prometheus.PermissiveLabelSanitization", + featuregate.StageAlpha, + featuregate.WithRegisterDescription("Controls whether to change labels starting with '_' to 'key_'."), + featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/8950"), +) + +// Normalizes the specified label to follow Prometheus label names standard // // See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels // -// Labels that start with non-letter rune will be prefixed with "key_". +// Labels that start with non-letter rune will be prefixed with "key_" // -// Exception is made for double-underscores which are allowed. +// Exception is made for double-underscores which are allowed func NormalizeLabel(label string) string { + // Trivial case if len(label) == 0 { return label @@ -27,12 +37,14 @@ func NormalizeLabel(label string) string { // If label starts with a number, prepend with "key_" if unicode.IsDigit(rune(label[0])) { label = "key_" + label + } else if strings.HasPrefix(label, "_") && !strings.HasPrefix(label, "__") && !dropSanitizationGate.IsEnabled() { + label = "key" + label } return label } -// Return '_' for anything non-alphanumeric. +// Return '_' for anything non-alphanumeric func sanitizeRune(r rune) rune { if unicode.IsLetter(r) || unicode.IsDigit(r) { return r diff --git a/storage/remote/otlptranslator/prometheus/normalize_label_test.go b/storage/remote/otlptranslator/prometheus/normalize_label_test.go deleted file mode 100644 index 7346b20f9..000000000 --- a/storage/remote/otlptranslator/prometheus/normalize_label_test.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package normalize - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestSanitizeDropSanitization(t *testing.T) { - require.Equal(t, "", NormalizeLabel("")) - require.Equal(t, "_test", NormalizeLabel("_test")) - require.Equal(t, "key_0test", NormalizeLabel("0test")) - require.Equal(t, "test", NormalizeLabel("test")) - require.Equal(t, "test__", NormalizeLabel("test_/")) - require.Equal(t, "__test", NormalizeLabel("__test")) -} diff --git a/storage/remote/otlptranslator/prometheus/normalize_name.go b/storage/remote/otlptranslator/prometheus/normalize_name.go index b57e5a057..72fc04cea 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name.go @@ -1,21 +1,23 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package normalize +package prometheus // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" import ( "strings" "unicode" + "go.opentelemetry.io/collector/featuregate" "go.opentelemetry.io/collector/pdata/pmetric" ) -// The map to translate OTLP units to Prometheus units. +// The map to translate OTLP units to Prometheus units // OTLP metrics use the c/s notation as specified at https://ucum.org/ucum.html // (See also https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/README.md#instrument-units) // Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units // OpenMetrics specification for units: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#units-and-base-units var unitMap = map[string]string{ + // Time "d": "days", "h": "hours", @@ -35,11 +37,6 @@ var unitMap = map[string]string{ "MBy": "megabytes", "GBy": "gigabytes", "TBy": "terabytes", - "B": "bytes", - "KB": "kilobytes", - "MB": "megabytes", - "GB": "gigabytes", - "TB": "terabytes", // SI "m": "meters", @@ -54,11 +51,10 @@ var unitMap = map[string]string{ "Hz": "hertz", "1": "", "%": "percent", - "$": "dollars", } -// The map that translates the "per" unit. -// Example: s => per second (singular). +// The map that translates the "per" unit +// Example: s => per second (singular) var perUnitMap = map[string]string{ "s": "second", "m": "minute", @@ -69,7 +65,14 @@ var perUnitMap = map[string]string{ "y": "year", } -// Build a Prometheus-compliant metric name for the specified metric. +var normalizeNameGate = featuregate.GlobalRegistry().MustRegister( + "pkg.translator.prometheus.NormalizeName", + featuregate.StageBeta, + featuregate.WithRegisterDescription("Controls whether metrics names are automatically normalized to follow Prometheus naming convention"), + featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/8950"), +) + +// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric // // Metric name is prefixed with specified namespace and underscore (if any). // Namespace is not cleaned up. Make sure specified namespace follows Prometheus @@ -77,7 +80,33 @@ var perUnitMap = map[string]string{ // // See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels // and https://prometheus.io/docs/practices/naming/#metric-and-label-naming -func BuildPromCompliantName(metric pmetric.Metric, namespace string) string { +func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string { + var metricName string + + // Full normalization following standard Prometheus naming conventions + if addMetricSuffixes && normalizeNameGate.IsEnabled() { + return normalizeName(metric, namespace) + } + + // Simple case (no full normalization, no units, etc.), we simply trim out forbidden chars + metricName = RemovePromForbiddenRunes(metric.Name()) + + // Namespace? + if namespace != "" { + return namespace + "_" + metricName + } + + // Metric name starts with a digit? Prefix it with an underscore + if metricName != "" && unicode.IsDigit(rune(metricName[0])) { + metricName = "_" + metricName + } + + return metricName +} + +// Build a normalized name for the specified metric +func normalizeName(metric pmetric.Metric, namespace string) string { + // Split metric name in "tokens" (remove all non-alphanumeric) nameTokens := strings.FieldsFunc( metric.Name(), @@ -202,7 +231,7 @@ func removeSuffix(tokens []string, suffix string) []string { return tokens } -// Clean up specified string so it's Prometheus compliant. +// Clean up specified string so it's Prometheus compliant func CleanUpString(s string) string { return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) }), "_") } @@ -211,8 +240,8 @@ func RemovePromForbiddenRunes(s string) string { return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != '_' && r != ':' }), "_") } -// Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit. -// Returns the specified unit if not found in unitMap. +// Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit +// Returns the specified unit if not found in unitMap func unitMapGetOrDefault(unit string) string { if promUnit, ok := unitMap[unit]; ok { return promUnit @@ -220,8 +249,8 @@ func unitMapGetOrDefault(unit string) string { return unit } -// Retrieve the Prometheus "per" unit corresponding to the specified "per" unit. -// Returns the specified unit if not found in perUnitMap. +// Retrieve the Prometheus "per" unit corresponding to the specified "per" unit +// Returns the specified unit if not found in perUnitMap func perUnitMapGetOrDefault(perUnit string) string { if promPerUnit, ok := perUnitMap[perUnit]; ok { return promPerUnit @@ -229,7 +258,7 @@ func perUnitMapGetOrDefault(perUnit string) string { return perUnit } -// Returns whether the slice contains the specified value. +// Returns whether the slice contains the specified value func contains(slice []string, value string) bool { for _, sliceEntry := range slice { if sliceEntry == value { @@ -239,7 +268,7 @@ func contains(slice []string, value string) bool { return false } -// Remove the specified value from the slice. +// Remove the specified value from the slice func removeItem(slice []string, value string) []string { newSlice := make([]string, 0, len(slice)) for _, sliceEntry := range slice { diff --git a/storage/remote/otlptranslator/prometheus/normalize_name_test.go b/storage/remote/otlptranslator/prometheus/normalize_name_test.go deleted file mode 100644 index 33910636a..000000000 --- a/storage/remote/otlptranslator/prometheus/normalize_name_test.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package normalize - -import ( - "testing" - - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/pdata/pmetric" -) - -func TestByte(t *testing.T) { - require.Equal(t, "system_filesystem_usage_bytes", BuildPromCompliantName(createGauge("system.filesystem.usage", "By"), "")) -} - -func TestByteCounter(t *testing.T) { - require.Equal(t, "system_io_bytes_total", BuildPromCompliantName(createCounter("system.io", "By"), "")) - require.Equal(t, "network_transmitted_bytes_total", BuildPromCompliantName(createCounter("network_transmitted_bytes_total", "By"), "")) -} - -func TestWhiteSpaces(t *testing.T) { - require.Equal(t, "system_filesystem_usage_bytes", BuildPromCompliantName(createGauge("\t system.filesystem.usage ", " By\t"), "")) -} - -func TestNonStandardUnit(t *testing.T) { - require.Equal(t, "system_network_dropped", BuildPromCompliantName(createGauge("system.network.dropped", "{packets}"), "")) -} - -func TestNonStandardUnitCounter(t *testing.T) { - require.Equal(t, "system_network_dropped_total", BuildPromCompliantName(createCounter("system.network.dropped", "{packets}"), "")) -} - -func TestBrokenUnit(t *testing.T) { - require.Equal(t, "system_network_dropped_packets", BuildPromCompliantName(createGauge("system.network.dropped", "packets"), "")) - require.Equal(t, "system_network_packets_dropped", BuildPromCompliantName(createGauge("system.network.packets.dropped", "packets"), "")) - require.Equal(t, "system_network_packets", BuildPromCompliantName(createGauge("system.network.packets", "packets"), "")) -} - -func TestBrokenUnitCounter(t *testing.T) { - require.Equal(t, "system_network_dropped_packets_total", BuildPromCompliantName(createCounter("system.network.dropped", "packets"), "")) - require.Equal(t, "system_network_packets_dropped_total", BuildPromCompliantName(createCounter("system.network.packets.dropped", "packets"), "")) - require.Equal(t, "system_network_packets_total", BuildPromCompliantName(createCounter("system.network.packets", "packets"), "")) -} - -func TestRatio(t *testing.T) { - require.Equal(t, "hw_gpu_memory_utilization_ratio", BuildPromCompliantName(createGauge("hw.gpu.memory.utilization", "1"), "")) - require.Equal(t, "hw_fan_speed_ratio", BuildPromCompliantName(createGauge("hw.fan.speed_ratio", "1"), "")) - require.Equal(t, "objects_total", BuildPromCompliantName(createCounter("objects", "1"), "")) -} - -func TestHertz(t *testing.T) { - require.Equal(t, "hw_cpu_speed_limit_hertz", BuildPromCompliantName(createGauge("hw.cpu.speed_limit", "Hz"), "")) -} - -func TestPer(t *testing.T) { - require.Equal(t, "broken_metric_speed_km_per_hour", BuildPromCompliantName(createGauge("broken.metric.speed", "km/h"), "")) - require.Equal(t, "astro_light_speed_limit_meters_per_second", BuildPromCompliantName(createGauge("astro.light.speed_limit", "m/s"), "")) -} - -func TestPercent(t *testing.T) { - require.Equal(t, "broken_metric_success_ratio_percent", BuildPromCompliantName(createGauge("broken.metric.success_ratio", "%"), "")) - require.Equal(t, "broken_metric_success_percent", BuildPromCompliantName(createGauge("broken.metric.success_percent", "%"), "")) -} - -func TestDollar(t *testing.T) { - require.Equal(t, "crypto_bitcoin_value_dollars", BuildPromCompliantName(createGauge("crypto.bitcoin.value", "$"), "")) - require.Equal(t, "crypto_bitcoin_value_dollars", BuildPromCompliantName(createGauge("crypto.bitcoin.value.dollars", "$"), "")) -} - -func TestEmpty(t *testing.T) { - require.Equal(t, "test_metric_no_unit", BuildPromCompliantName(createGauge("test.metric.no_unit", ""), "")) - require.Equal(t, "test_metric_spaces", BuildPromCompliantName(createGauge("test.metric.spaces", " \t "), "")) -} - -func TestUnsupportedRunes(t *testing.T) { - require.Equal(t, "unsupported_metric_temperature_F", BuildPromCompliantName(createGauge("unsupported.metric.temperature", "°F"), "")) - require.Equal(t, "unsupported_metric_weird", BuildPromCompliantName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), "")) - require.Equal(t, "unsupported_metric_redundant_test_per_C", BuildPromCompliantName(createGauge("unsupported.metric.redundant", "__test $/°C"), "")) -} - -func TestOtelReceivers(t *testing.T) { - require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", BuildPromCompliantName(createCounter("active_directory.ds.replication.network.io", "By"), "")) - require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", BuildPromCompliantName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), "")) - require.Equal(t, "active_directory_ds_replication_object_rate_per_second", BuildPromCompliantName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), "")) - require.Equal(t, "active_directory_ds_name_cache_hit_rate_percent", BuildPromCompliantName(createGauge("active_directory.ds.name_cache.hit_rate", "%"), "")) - require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time_milliseconds", BuildPromCompliantName(createGauge("active_directory.ds.ldap.bind.last_successful.time", "ms"), "")) - require.Equal(t, "apache_current_connections", BuildPromCompliantName(createGauge("apache.current_connections", "connections"), "")) - require.Equal(t, "apache_workers_connections", BuildPromCompliantName(createGauge("apache.workers", "connections"), "")) - require.Equal(t, "apache_requests_total", BuildPromCompliantName(createCounter("apache.requests", "1"), "")) - require.Equal(t, "bigip_virtual_server_request_count_total", BuildPromCompliantName(createCounter("bigip.virtual_server.request.count", "{requests}"), "")) - require.Equal(t, "system_cpu_utilization_ratio", BuildPromCompliantName(createGauge("system.cpu.utilization", "1"), "")) - require.Equal(t, "system_disk_operation_time_seconds_total", BuildPromCompliantName(createCounter("system.disk.operation_time", "s"), "")) - require.Equal(t, "system_cpu_load_average_15m_ratio", BuildPromCompliantName(createGauge("system.cpu.load_average.15m", "1"), "")) - require.Equal(t, "memcached_operation_hit_ratio_percent", BuildPromCompliantName(createGauge("memcached.operation_hit_ratio", "%"), "")) - require.Equal(t, "mongodbatlas_process_asserts_per_second", BuildPromCompliantName(createGauge("mongodbatlas.process.asserts", "{assertions}/s"), "")) - require.Equal(t, "mongodbatlas_process_journaling_data_files_mebibytes", BuildPromCompliantName(createGauge("mongodbatlas.process.journaling.data_files", "MiBy"), "")) - require.Equal(t, "mongodbatlas_process_network_io_bytes_per_second", BuildPromCompliantName(createGauge("mongodbatlas.process.network.io", "By/s"), "")) - require.Equal(t, "mongodbatlas_process_oplog_rate_gibibytes_per_hour", BuildPromCompliantName(createGauge("mongodbatlas.process.oplog.rate", "GiBy/h"), "")) - require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", BuildPromCompliantName(createGauge("mongodbatlas.process.db.query_targeting.scanned_per_returned", "{scanned}/{returned}"), "")) - require.Equal(t, "nginx_requests", BuildPromCompliantName(createGauge("nginx.requests", "requests"), "")) - require.Equal(t, "nginx_connections_accepted", BuildPromCompliantName(createGauge("nginx.connections_accepted", "connections"), "")) - require.Equal(t, "nsxt_node_memory_usage_kilobytes", BuildPromCompliantName(createGauge("nsxt.node.memory.usage", "KBy"), "")) - require.Equal(t, "redis_latest_fork_microseconds", BuildPromCompliantName(createGauge("redis.latest_fork", "us"), "")) -} - -func TestTrimPromSuffixes(t *testing.T) { - require.Equal(t, "active_directory_ds_replication_network_io", TrimPromSuffixes("active_directory_ds_replication_network_io_bytes_total", pmetric.MetricTypeSum, "bytes")) - require.Equal(t, "active_directory_ds_name_cache_hit_rate", TrimPromSuffixes("active_directory_ds_name_cache_hit_rate_percent", pmetric.MetricTypeGauge, "percent")) - require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time", TrimPromSuffixes("active_directory_ds_ldap_bind_last_successful_time_milliseconds", pmetric.MetricTypeGauge, "milliseconds")) - require.Equal(t, "apache_requests", TrimPromSuffixes("apache_requests_total", pmetric.MetricTypeSum, "1")) - require.Equal(t, "system_cpu_utilization", TrimPromSuffixes("system_cpu_utilization_ratio", pmetric.MetricTypeGauge, "ratio")) - require.Equal(t, "mongodbatlas_process_journaling_data_files", TrimPromSuffixes("mongodbatlas_process_journaling_data_files_mebibytes", pmetric.MetricTypeGauge, "mebibytes")) - require.Equal(t, "mongodbatlas_process_network_io", TrimPromSuffixes("mongodbatlas_process_network_io_bytes_per_second", pmetric.MetricTypeGauge, "bytes_per_second")) - require.Equal(t, "mongodbatlas_process_oplog_rate", TrimPromSuffixes("mongodbatlas_process_oplog_rate_gibibytes_per_hour", pmetric.MetricTypeGauge, "gibibytes_per_hour")) - require.Equal(t, "nsxt_node_memory_usage", TrimPromSuffixes("nsxt_node_memory_usage_kilobytes", pmetric.MetricTypeGauge, "kilobytes")) - require.Equal(t, "redis_latest_fork", TrimPromSuffixes("redis_latest_fork_microseconds", pmetric.MetricTypeGauge, "microseconds")) - require.Equal(t, "up", TrimPromSuffixes("up", pmetric.MetricTypeGauge, "")) - - // These are not necessarily valid OM units, only tested for the sake of completeness. - require.Equal(t, "active_directory_ds_replication_sync_object_pending", TrimPromSuffixes("active_directory_ds_replication_sync_object_pending_total", pmetric.MetricTypeSum, "{objects}")) - require.Equal(t, "apache_current", TrimPromSuffixes("apache_current_connections", pmetric.MetricTypeGauge, "connections")) - require.Equal(t, "bigip_virtual_server_request_count", TrimPromSuffixes("bigip_virtual_server_request_count_total", pmetric.MetricTypeSum, "{requests}")) - require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", TrimPromSuffixes("mongodbatlas_process_db_query_targeting_scanned_per_returned", pmetric.MetricTypeGauge, "{scanned}/{returned}")) - require.Equal(t, "nginx_connections_accepted", TrimPromSuffixes("nginx_connections_accepted", pmetric.MetricTypeGauge, "connections")) - require.Equal(t, "apache_workers", TrimPromSuffixes("apache_workers_connections", pmetric.MetricTypeGauge, "connections")) - require.Equal(t, "nginx", TrimPromSuffixes("nginx_requests", pmetric.MetricTypeGauge, "requests")) - - // Units shouldn't be trimmed if the unit is not a direct match with the suffix, i.e, a suffix "_seconds" shouldn't be removed if unit is "sec" or "s" - require.Equal(t, "system_cpu_load_average_15m_ratio", TrimPromSuffixes("system_cpu_load_average_15m_ratio", pmetric.MetricTypeGauge, "1")) - require.Equal(t, "mongodbatlas_process_asserts_per_second", TrimPromSuffixes("mongodbatlas_process_asserts_per_second", pmetric.MetricTypeGauge, "{assertions}/s")) - require.Equal(t, "memcached_operation_hit_ratio_percent", TrimPromSuffixes("memcached_operation_hit_ratio_percent", pmetric.MetricTypeGauge, "%")) - require.Equal(t, "active_directory_ds_replication_object_rate_per_second", TrimPromSuffixes("active_directory_ds_replication_object_rate_per_second", pmetric.MetricTypeGauge, "{objects}/s")) - require.Equal(t, "system_disk_operation_time_seconds", TrimPromSuffixes("system_disk_operation_time_seconds_total", pmetric.MetricTypeSum, "s")) -} - -func TestNamespace(t *testing.T) { - require.Equal(t, "space_test", BuildPromCompliantName(createGauge("test", ""), "space")) - require.Equal(t, "space_test", BuildPromCompliantName(createGauge("#test", ""), "space")) -} - -func TestCleanUpString(t *testing.T) { - require.Equal(t, "", CleanUpString("")) - require.Equal(t, "a_b", CleanUpString("a b")) - require.Equal(t, "hello_world", CleanUpString("hello, world!")) - require.Equal(t, "hello_you_2", CleanUpString("hello you 2")) - require.Equal(t, "1000", CleanUpString("$1000")) - require.Equal(t, "", CleanUpString("*+$^=)")) -} - -func TestUnitMapGetOrDefault(t *testing.T) { - require.Equal(t, "", unitMapGetOrDefault("")) - require.Equal(t, "seconds", unitMapGetOrDefault("s")) - require.Equal(t, "invalid", unitMapGetOrDefault("invalid")) -} - -func TestPerUnitMapGetOrDefault(t *testing.T) { - require.Equal(t, "", perUnitMapGetOrDefault("")) - require.Equal(t, "second", perUnitMapGetOrDefault("s")) - require.Equal(t, "invalid", perUnitMapGetOrDefault("invalid")) -} - -func TestRemoveItem(t *testing.T) { - require.Equal(t, []string{}, removeItem([]string{}, "test")) - require.Equal(t, []string{}, removeItem([]string{}, "")) - require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "d")) - require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "")) - require.Equal(t, []string{"a", "b"}, removeItem([]string{"a", "b", "c"}, "c")) - require.Equal(t, []string{"a", "c"}, removeItem([]string{"a", "b", "c"}, "b")) - require.Equal(t, []string{"b", "c"}, removeItem([]string{"a", "b", "c"}, "a")) -} - -func TestBuildPromCompliantName(t *testing.T) { - require.Equal(t, "system_io_bytes_total", BuildPromCompliantName(createCounter("system.io", "By"), "")) - require.Equal(t, "system_network_io_bytes_total", BuildPromCompliantName(createCounter("network.io", "By"), "system")) - require.Equal(t, "_3_14_digits", BuildPromCompliantName(createGauge("3.14 digits", ""), "")) - require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildPromCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "")) - require.Equal(t, "foo_bar", BuildPromCompliantName(createGauge(":foo::bar", ""), "")) - require.Equal(t, "foo_bar_total", BuildPromCompliantName(createCounter(":foo::bar", ""), "")) -} diff --git a/storage/remote/otlptranslator/prometheus/testutils_test.go b/storage/remote/otlptranslator/prometheus/testutils_test.go deleted file mode 100644 index dc4983bf5..000000000 --- a/storage/remote/otlptranslator/prometheus/testutils_test.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package normalize - -import ( - "go.opentelemetry.io/collector/pdata/pmetric" -) - -var ilm pmetric.ScopeMetrics - -func init() { - metrics := pmetric.NewMetrics() - resourceMetrics := metrics.ResourceMetrics().AppendEmpty() - ilm = resourceMetrics.ScopeMetrics().AppendEmpty() -} - -// Returns a new Metric of type "Gauge" with specified name and unit. -func createGauge(name, unit string) pmetric.Metric { - gauge := ilm.Metrics().AppendEmpty() - gauge.SetName(name) - gauge.SetUnit(unit) - gauge.SetEmptyGauge() - return gauge -} - -// Returns a new Metric of type Monotonic Sum with specified name and unit. -func createCounter(name, unit string) pmetric.Metric { - counter := ilm.Metrics().AppendEmpty() - counter.SetEmptySum().SetIsMonotonic(true) - counter.SetName(name) - counter.SetUnit(unit) - return counter -} diff --git a/storage/remote/otlptranslator/prometheus/unit_to_ucum.go b/storage/remote/otlptranslator/prometheus/unit_to_ucum.go new file mode 100644 index 000000000..b2f2c4f3a --- /dev/null +++ b/storage/remote/otlptranslator/prometheus/unit_to_ucum.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package prometheus // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" + +import "strings" + +var wordToUCUM = map[string]string{ + + // Time + "days": "d", + "hours": "h", + "minutes": "min", + "seconds": "s", + "milliseconds": "ms", + "microseconds": "us", + "nanoseconds": "ns", + + // Bytes + "bytes": "By", + "kibibytes": "KiBy", + "mebibytes": "MiBy", + "gibibytes": "GiBy", + "tibibytes": "TiBy", + "kilobytes": "KBy", + "megabytes": "MBy", + "gigabytes": "GBy", + "terabytes": "TBy", + + // SI + "meters": "m", + "volts": "V", + "amperes": "A", + "joules": "J", + "watts": "W", + "grams": "g", + + // Misc + "celsius": "Cel", + "hertz": "Hz", + "ratio": "1", + "percent": "%", +} + +// The map that translates the "per" unit +// Example: per_second (singular) => /s +var perWordToUCUM = map[string]string{ + "second": "s", + "minute": "m", + "hour": "h", + "day": "d", + "week": "w", + "month": "mo", + "year": "y", +} + +// UnitWordToUCUM converts english unit words to UCUM units: +// https://ucum.org/ucum#section-Alphabetic-Index-By-Symbol +// It also handles rates, such as meters_per_second, by translating the first +// word to UCUM, and the "per" word to UCUM. It joins them with a "/" between. +func UnitWordToUCUM(unit string) string { + unitTokens := strings.SplitN(unit, "_per_", 2) + if len(unitTokens) == 0 { + return "" + } + ucumUnit := wordToUCUMOrDefault(unitTokens[0]) + if len(unitTokens) > 1 && unitTokens[1] != "" { + ucumUnit += "/" + perWordToUCUMOrDefault(unitTokens[1]) + } + return ucumUnit +} + +// wordToUCUMOrDefault retrieves the Prometheus "basic" unit corresponding to +// the specified "basic" unit. Returns the specified unit if not found in +// wordToUCUM. +func wordToUCUMOrDefault(unit string) string { + if promUnit, ok := wordToUCUM[unit]; ok { + return promUnit + } + return unit +} + +// perWordToUCUMOrDefault retrieve the Prometheus "per" unit corresponding to +// the specified "per" unit. Returns the specified unit if not found in perWordToUCUM. +func perWordToUCUMOrDefault(perUnit string) string { + if promPerUnit, ok := perWordToUCUM[perUnit]; ok { + return promPerUnit + } + return perUnit +} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index 6080686e7..49ad5672b 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -71,8 +71,8 @@ func (a ByLabelName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // creates a new TimeSeries in the map if not found and returns the time series signature. // tsMap will be unmodified if either labels or sample is nil, but can still be modified if the exemplar is nil. func addSample(tsMap map[string]*prompb.TimeSeries, sample *prompb.Sample, labels []prompb.Label, - datatype string, -) string { + datatype string) string { + if sample == nil || labels == nil || tsMap == nil { return "" } @@ -132,7 +132,14 @@ func addExemplar(tsMap map[string]*prompb.TimeSeries, bucketBounds []bucketBound // the label slice should not contain duplicate label names; this method sorts the slice by label name before creating // the signature. func timeSeriesSignature(datatype string, labels *[]prompb.Label) string { + length := len(datatype) + + for _, lb := range *labels { + length += 2 + len(lb.GetName()) + len(lb.GetValue()) + } + b := strings.Builder{} + b.Grow(length) b.WriteString(datatype) sort.Sort(ByLabelName(*labels)) @@ -151,8 +158,22 @@ func timeSeriesSignature(datatype string, labels *[]prompb.Label) string { // Unpaired string value is ignored. String pairs overwrites OTLP labels if collision happens, and the overwrite is // logged. Resultant label names are sanitized. func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externalLabels map[string]string, extras ...string) []prompb.Label { + serviceName, haveServiceName := resource.Attributes().Get(conventions.AttributeServiceName) + instance, haveInstanceID := resource.Attributes().Get(conventions.AttributeServiceInstanceID) + + // Calculate the maximum possible number of labels we could return so we can preallocate l + maxLabelCount := attributes.Len() + len(externalLabels) + len(extras)/2 + + if haveServiceName { + maxLabelCount++ + } + + if haveInstanceID { + maxLabelCount++ + } + // map ensures no duplicate label name - l := map[string]prompb.Label{} + l := make(map[string]string, maxLabelCount) // Ensure attributes are sorted by key for consistent merging of keys which // collide when sanitized. @@ -164,35 +185,25 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa sort.Stable(ByLabelName(labels)) for _, label := range labels { - finalKey := prometheustranslator.NormalizeLabel(label.Name) + var finalKey = prometheustranslator.NormalizeLabel(label.Name) if existingLabel, alreadyExists := l[finalKey]; alreadyExists { - existingLabel.Value = existingLabel.Value + ";" + label.Value - l[finalKey] = existingLabel + l[finalKey] = existingLabel + ";" + label.Value } else { - l[finalKey] = prompb.Label{ - Name: finalKey, - Value: label.Value, - } + l[finalKey] = label.Value } } // Map service.name + service.namespace to job - if serviceName, ok := resource.Attributes().Get(conventions.AttributeServiceName); ok { + if haveServiceName { val := serviceName.AsString() if serviceNamespace, ok := resource.Attributes().Get(conventions.AttributeServiceNamespace); ok { val = fmt.Sprintf("%s/%s", serviceNamespace.AsString(), val) } - l[model.JobLabel] = prompb.Label{ - Name: model.JobLabel, - Value: val, - } + l[model.JobLabel] = val } // Map service.instance.id to instance - if instance, ok := resource.Attributes().Get(conventions.AttributeServiceInstanceID); ok { - l[model.InstanceLabel] = prompb.Label{ - Name: model.InstanceLabel, - Value: instance.AsString(), - } + if haveInstanceID { + l[model.InstanceLabel] = instance.AsString() } for key, value := range externalLabels { // External labels have already been sanitized @@ -200,10 +211,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa // Skip external labels if they are overridden by metric attributes continue } - l[key] = prompb.Label{ - Name: key, - Value: value, - } + l[key] = value } for i := 0; i < len(extras); i += 2 { @@ -219,15 +227,12 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa if !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") { name = prometheustranslator.NormalizeLabel(name) } - l[name] = prompb.Label{ - Name: name, - Value: extras[i+1], - } + l[name] = extras[i+1] } s := make([]prompb.Label, 0, len(l)) - for _, lb := range l { - s = append(s, lb) + for k, v := range l { + s = append(s, prompb.Label{Name: k, Value: v}) } return s @@ -236,6 +241,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa // isValidAggregationTemporality checks whether an OTel metric has a valid // aggregation temporality for conversion to a Prometheus metric. func isValidAggregationTemporality(metric pmetric.Metric) bool { + //exhaustive:enforce switch metric.Type() { case pmetric.MetricTypeGauge, pmetric.MetricTypeSummary: return true @@ -254,7 +260,22 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool { func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings, tsMap map[string]*prompb.TimeSeries) { timestamp := convertTimeStamp(pt.Timestamp()) // sum, count, and buckets of the histogram should append suffix to baseName - baseName := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace) + baseName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes) + baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels) + + createLabels := func(nameSuffix string, extras ...string) []prompb.Label { + extraLabelCount := len(extras) / 2 + labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name + copy(labels, baseLabels) + + for extrasIdx := 0; extrasIdx < extraLabelCount; extrasIdx++ { + labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]}) + } + + labels = append(labels, prompb.Label{Name: nameStr, Value: baseName + nameSuffix}) + + return labels + } // If the sum is unset, it indicates the _sum metric point should be // omitted @@ -268,7 +289,7 @@ func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon sum.Value = math.Float64frombits(value.StaleNaN) } - sumlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+sumStr) + sumlabels := createLabels(sumStr) addSample(tsMap, sum, sumlabels, metric.Type().String()) } @@ -282,7 +303,7 @@ func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon count.Value = math.Float64frombits(value.StaleNaN) } - countlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+countStr) + countlabels := createLabels(countStr) addSample(tsMap, count, countlabels, metric.Type().String()) // cumulative count for conversion to cumulative histogram @@ -304,7 +325,7 @@ func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon bucket.Value = math.Float64frombits(value.StaleNaN) } boundStr := strconv.FormatFloat(bound, 'f', -1, 64) - labels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+bucketStr, leStr, boundStr) + labels := createLabels(bucketStr, leStr, boundStr) sig := addSample(tsMap, bucket, labels, metric.Type().String()) bucketBounds = append(bucketBounds, bucketBoundsData{sig: sig, bound: bound}) @@ -318,7 +339,7 @@ func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon } else { infBucket.Value = float64(pt.Count()) } - infLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+bucketStr, leStr, pInfStr) + infLabels := createLabels(bucketStr, leStr, pInfStr) sig := addSample(tsMap, infBucket, infLabels, metric.Type().String()) bucketBounds = append(bucketBounds, bucketBoundsData{sig: sig, bound: math.Inf(1)}) @@ -327,14 +348,8 @@ func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon // add _created time series if needed startTimestamp := pt.StartTimestamp() if settings.ExportCreatedMetric && startTimestamp != 0 { - createdLabels := createAttributes( - resource, - pt.Attributes(), - settings.ExternalLabels, - nameStr, - baseName+createdSuffix, - ) - addCreatedTimeSeriesIfNeeded(tsMap, createdLabels, startTimestamp, metric.Type().String()) + labels := createLabels(createdSuffix) + addCreatedTimeSeriesIfNeeded(tsMap, labels, startTimestamp, metric.Type().String()) } } @@ -402,6 +417,7 @@ func getPromExemplars[T exemplarType](pt T) []prompb.Exemplar { func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp { var ts pcommon.Timestamp // handle individual metric based on type + //exhaustive:enforce switch metric.Type() { case pmetric.MetricTypeGauge: dataPoints := metric.Gauge().DataPoints() @@ -441,11 +457,26 @@ func maxTimestamp(a, b pcommon.Timestamp) pcommon.Timestamp { // addSingleSummaryDataPoint converts pt to len(QuantileValues) + 2 samples. func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings, - tsMap map[string]*prompb.TimeSeries, -) { + tsMap map[string]*prompb.TimeSeries) { timestamp := convertTimeStamp(pt.Timestamp()) // sum and count of the summary should append suffix to baseName - baseName := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace) + baseName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes) + baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels) + + createLabels := func(name string, extras ...string) []prompb.Label { + extraLabelCount := len(extras) / 2 + labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name + copy(labels, baseLabels) + + for extrasIdx := 0; extrasIdx < extraLabelCount; extrasIdx++ { + labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]}) + } + + labels = append(labels, prompb.Label{Name: nameStr, Value: name}) + + return labels + } + // treat sum as a sample in an individual TimeSeries sum := &prompb.Sample{ Value: pt.Sum(), @@ -454,7 +485,7 @@ func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Res if pt.Flags().NoRecordedValue() { sum.Value = math.Float64frombits(value.StaleNaN) } - sumlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+sumStr) + sumlabels := createLabels(baseName + sumStr) addSample(tsMap, sum, sumlabels, metric.Type().String()) // treat count as a sample in an individual TimeSeries @@ -465,7 +496,7 @@ func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Res if pt.Flags().NoRecordedValue() { count.Value = math.Float64frombits(value.StaleNaN) } - countlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+countStr) + countlabels := createLabels(baseName + countStr) addSample(tsMap, count, countlabels, metric.Type().String()) // process each percentile/quantile @@ -479,20 +510,14 @@ func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Res quantile.Value = math.Float64frombits(value.StaleNaN) } percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64) - qtlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName, quantileStr, percentileStr) + qtlabels := createLabels(baseName, quantileStr, percentileStr) addSample(tsMap, quantile, qtlabels, metric.Type().String()) } // add _created time series if needed startTimestamp := pt.StartTimestamp() if settings.ExportCreatedMetric && startTimestamp != 0 { - createdLabels := createAttributes( - resource, - pt.Attributes(), - settings.ExternalLabels, - nameStr, - baseName+createdSuffix, - ) + createdLabels := createLabels(baseName + createdSuffix) addCreatedTimeSeriesIfNeeded(tsMap, createdLabels, startTimestamp, metric.Type().String()) } } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go index 9a4ec6e11..3c7494a6b 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go @@ -60,15 +60,20 @@ func addSingleExponentialHistogramDataPoint( // to Prometheus Native Histogram. func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prompb.Histogram, error) { scale := p.Scale() - if scale < -4 || scale > 8 { + if scale < -4 { return prompb.Histogram{}, fmt.Errorf("cannot convert exponential to native histogram."+ - " Scale must be <= 8 and >= -4, was %d", scale) - // TODO: downscale to 8 if scale > 8 + " Scale must be >= -4, was %d", scale) } - pSpans, pDeltas := convertBucketsLayout(p.Positive()) - nSpans, nDeltas := convertBucketsLayout(p.Negative()) + var scaleDown int32 + if scale > 8 { + scaleDown = scale - 8 + scale = 8 + } + + pSpans, pDeltas := convertBucketsLayout(p.Positive(), scaleDown) + nSpans, nDeltas := convertBucketsLayout(p.Negative(), scaleDown) h := prompb.Histogram{ Schema: scale, @@ -106,17 +111,19 @@ func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prom // The bucket indexes conversion was adjusted, since OTel exp. histogram bucket // index 0 corresponds to the range (1, base] while Prometheus bucket index 0 // to the range (base 1]. -func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets) ([]prompb.BucketSpan, []int64) { +// +// scaleDown is the factor by which the buckets are scaled down. In other words 2^scaleDown buckets will be merged into one. +func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets, scaleDown int32) ([]prompb.BucketSpan, []int64) { bucketCounts := buckets.BucketCounts() if bucketCounts.Len() == 0 { return nil, nil } var ( - spans []prompb.BucketSpan - deltas []int64 - prevCount int64 - nextBucketIdx int32 + spans []prompb.BucketSpan + deltas []int64 + count int64 + prevCount int64 ) appendDelta := func(count int64) { @@ -125,34 +132,67 @@ func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets) prevCount = count } - for i := 0; i < bucketCounts.Len(); i++ { - count := int64(bucketCounts.At(i)) + // Let the compiler figure out that this is const during this function by + // moving it into a local variable. + numBuckets := bucketCounts.Len() + + // The offset is scaled and adjusted by 1 as described above. + bucketIdx := buckets.Offset()>>scaleDown + 1 + spans = append(spans, prompb.BucketSpan{ + Offset: bucketIdx, + Length: 0, + }) + + for i := 0; i < numBuckets; i++ { + // The offset is scaled and adjusted by 1 as described above. + nextBucketIdx := (int32(i)+buckets.Offset())>>scaleDown + 1 + if bucketIdx == nextBucketIdx { // We have not collected enough buckets to merge yet. + count += int64(bucketCounts.At(i)) + continue + } if count == 0 { + count = int64(bucketCounts.At(i)) continue } - // The offset is adjusted by 1 as described above. - bucketIdx := int32(i) + buckets.Offset() + 1 - delta := bucketIdx - nextBucketIdx - if i == 0 || delta > 2 { - // We have to create a new span, either because we are - // at the very beginning, or because we have found a gap + gap := nextBucketIdx - bucketIdx - 1 + if gap > 2 { + // We have to create a new span, because we have found a gap // of more than two buckets. The constant 2 is copied from the logic in // https://github.com/prometheus/client_golang/blob/27f0506d6ebbb117b6b697d0552ee5be2502c5f2/prometheus/histogram.go#L1296 spans = append(spans, prompb.BucketSpan{ - Offset: delta, + Offset: gap, Length: 0, }) } else { // We have found a small gap (or no gap at all). // Insert empty buckets as needed. - for j := int32(0); j < delta; j++ { + for j := int32(0); j < gap; j++ { appendDelta(0) } } appendDelta(count) - nextBucketIdx = bucketIdx + 1 + count = int64(bucketCounts.At(i)) + bucketIdx = nextBucketIdx } + // Need to use the last item's index. The offset is scaled and adjusted by 1 as described above. + gap := (int32(numBuckets)+buckets.Offset()-1)>>scaleDown + 1 - bucketIdx + if gap > 2 { + // We have to create a new span, because we have found a gap + // of more than two buckets. The constant 2 is copied from the logic in + // https://github.com/prometheus/client_golang/blob/27f0506d6ebbb117b6b697d0552ee5be2502c5f2/prometheus/histogram.go#L1296 + spans = append(spans, prompb.BucketSpan{ + Offset: gap, + Length: 0, + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < gap; j++ { + appendDelta(0) + } + } + appendDelta(count) return spans, deltas } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 34ee762dd..6a5a65604 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -22,6 +22,7 @@ type Settings struct { ExternalLabels map[string]string DisableTargetInfo bool ExportCreatedMetric bool + AddMetricSuffixes bool } // FromMetrics converts pmetric.Metrics to prometheus remote write format. @@ -51,6 +52,7 @@ func FromMetrics(md pmetric.Metrics, settings Settings) (tsMap map[string]*promp } // handle individual metric based on type + //exhaustive:enforce switch metric.Type() { case pmetric.MetricTypeGauge: dataPoints := metric.Gauge().DataPoints() @@ -81,7 +83,7 @@ func FromMetrics(md pmetric.Metrics, settings Settings) (tsMap map[string]*promp if dataPoints.Len() == 0 { errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) } - name := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace) + name := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes) for x := 0; x < dataPoints.Len(); x++ { errs = multierr.Append( errs, diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go index 3a5d201dd..c8e59694b 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go @@ -27,7 +27,7 @@ func addSingleGaugeNumberDataPoint( settings Settings, series map[string]*prompb.TimeSeries, ) { - name := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace) + name := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes) labels := createAttributes( resource, pt.Attributes(), @@ -60,7 +60,7 @@ func addSingleSumNumberDataPoint( settings Settings, series map[string]*prompb.TimeSeries, ) { - name := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace) + name := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes) labels := createAttributes( resource, pt.Attributes(), diff --git a/storage/remote/otlptranslator/update-copy.sh b/storage/remote/otlptranslator/update-copy.sh index 13a2a7a2e..36ad0cc35 100755 --- a/storage/remote/otlptranslator/update-copy.sh +++ b/storage/remote/otlptranslator/update-copy.sh @@ -1,6 +1,6 @@ #!/bin/bash -OTEL_VERSION=v0.81.0 +OTEL_VERSION=v0.88.0 git clone https://github.com/open-telemetry/opentelemetry-collector-contrib ./tmp cd ./tmp @@ -8,7 +8,8 @@ git checkout $OTEL_VERSION cd .. rm -rf ./prometheusremotewrite/* cp -r ./tmp/pkg/translator/prometheusremotewrite/*.go ./prometheusremotewrite -rm -rf ./prometheusremotewrite/*_test.go +cp -r ./tmp/pkg/translator/prometheus/*.go ./prometheus +rm -rf ./prometheus/*_test.go rm -rf ./tmp sed -i '' 's#github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus#github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus#g' ./prometheusremotewrite/*.go From 4710679fbe8fd8e6e40c85a59ee0d86befb39c57 Mon Sep 17 00:00:00 2001 From: Goutham Date: Wed, 15 Nov 2023 15:30:09 +0100 Subject: [PATCH 588/632] Skip golanglint-ci on copied folders Signed-off-by: Goutham --- .golangci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.golangci.yml b/.golangci.yml index 666d22cbe..4df572c19 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -6,6 +6,7 @@ run: skip-dirs: # Copied it from a different source - storage/remote/otlptranslator/prometheusremotewrite + - storage/remote/otlptranslator/prometheus output: sort-results: true From 3048a88ae76c5d42d88726a4ba5ffe95964541e6 Mon Sep 17 00:00:00 2001 From: Goutham Date: Wed, 15 Nov 2023 15:52:18 +0100 Subject: [PATCH 589/632] Add suffixes Older version already did that. This upgrade needed manual opt-in Signed-off-by: Goutham --- storage/remote/write_handler.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index a0dd3940e..9891c6aae 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -207,7 +207,9 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } - prwMetricsMap, errs := otlptranslator.FromMetrics(req.Metrics(), otlptranslator.Settings{}) + prwMetricsMap, errs := otlptranslator.FromMetrics(req.Metrics(), otlptranslator.Settings{ + AddMetricSuffixes: true, + }) if errs != nil { level.Warn(h.logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", errs) } From b53254a81881d5439429a0261d0827dbf9cc1d9d Mon Sep 17 00:00:00 2001 From: Kemal Akkoyun Date: Wed, 15 Nov 2023 18:10:39 +0100 Subject: [PATCH 590/632] Upgrade golang.org/x packages Signed-off-by: Kemal Akkoyun --- go.mod | 18 +++++++++--------- go.sum | 31 ++++++++++++++++++------------- 2 files changed, 27 insertions(+), 22 deletions(-) diff --git a/go.mod b/go.mod index 44cf7c4b2..caca71f81 100644 --- a/go.mod +++ b/go.mod @@ -68,13 +68,13 @@ require ( go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.2.1 go.uber.org/multierr v1.11.0 - golang.org/x/exp v0.0.0-20231006140011-7918f672742d - golang.org/x/net v0.17.0 + golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa + golang.org/x/net v0.18.0 golang.org/x/oauth2 v0.13.0 - golang.org/x/sync v0.4.0 - golang.org/x/sys v0.13.0 + golang.org/x/sync v0.5.0 + golang.org/x/sys v0.14.0 golang.org/x/time v0.3.0 - golang.org/x/tools v0.14.0 + golang.org/x/tools v0.15.0 google.golang.org/api v0.147.0 google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a google.golang.org/grpc v1.59.0 @@ -178,10 +178,10 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel/metric v1.19.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect - golang.org/x/crypto v0.14.0 // indirect - golang.org/x/mod v0.13.0 // indirect - golang.org/x/term v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect + golang.org/x/crypto v0.15.0 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/term v0.14.0 // indirect + golang.org/x/text v0.14.0 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index c4658fbdb..e6436825d 100644 --- a/go.sum +++ b/go.sum @@ -811,8 +811,9 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= +golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -823,8 +824,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -847,8 +848,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -894,8 +895,9 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= +golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -918,8 +920,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= -golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -988,15 +990,17 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8= +golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1009,8 +1013,9 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1071,8 +1076,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8= +golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 41c725333347108fe77ca34e53978e85b602d090 Mon Sep 17 00:00:00 2001 From: Levi Harrison Date: Wed, 15 Nov 2023 15:15:57 -0500 Subject: [PATCH 591/632] Release 2.48.0 Signed-off-by: Levi Harrison --- CHANGELOG.md | 17 +++++------------ VERSION | 2 +- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 18 +++++++++--------- web/ui/package.json | 2 +- web/ui/react-app/package.json | 4 ++-- 7 files changed, 21 insertions(+), 28 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 35f9f7768..7bab5107c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,21 +1,11 @@ # Changelog -## 2.48.0-rc.2 / 2023-11-02 - -* [ENHANCEMENT] Scraping: Add configuration option for tracking staleness of scraped timestamps. #13060 -* [BUGFIX] Storage: Fix crash caused by incorrect mixed samples handling. #13055 -* [BUGFIX] TSDB: Fix compactor failures by adding min time to histogram chunks. #13062 - -## 2.48.0-rc.1 / 2023-10-24 - -* [BUGFIX] PromQL: Reduce inefficiency introduced by warnings/annotations and temporarily remove possible non-counter warnings. #13012 - -## 2.48.0-rc.0 / 2023-10-10 +## 2.48.0 / 2023-11-16 * [CHANGE] Remote-write: respect Retry-After header on 5xx errors. #12677 * [FEATURE] Alerting: Add AWS SigV4 authentication support for Alertmanager endpoints. #12774 * [FEATURE] Promtool: Add support for histograms in the TSDB dump command. #12775 -* [FEATURE] PromQL: Add warnings (and annotations) to PromQL query results. #12152 #12982 #12988 +* [FEATURE] PromQL: Add warnings (and annotations) to PromQL query results. #12152 #12982 #12988 #13012 * [FEATURE] Remote-write: Add Azure AD OAuth authentication support for remote write requests. #12572 * [ENHANCEMENT] Remote-write: Add a header to count retried remote write requests. #12729 * [ENHANCEMENT] TSDB: Improve query performance by re-using iterator when moving between series. #12757 @@ -31,6 +21,7 @@ * [ENHANCEMENT] Scraping: Save memory when scraping by delaying creation of buffer. #12953 * [ENHANCEMENT] Agent: Allow ingestion of out-of-order samples. #12897 * [ENHANCEMENT] Promtool: Improve support for native histograms in TSDB analyze command. #12869 +* [ENHANCEMENT] Scraping: Add configuration option for tracking staleness of scraped timestamps. #13060 * [BUGFIX] SD: Ensure that discovery managers are properly canceled. #10569 * [BUGFIX] TSDB: Fix PostingsForMatchers race with creating new series. #12558 * [BUGFIX] TSDB: Fix handling of explicit counter reset header in histograms. #12772 @@ -40,6 +31,8 @@ * [BUGFIX] Promtool: Fix errors not being reported in check rules command. #12715 * [BUGFIX] TSDB: Avoid panics reported in logs when head initialization takes a long time. #12876 * [BUGFIX] TSDB: Ensure that WBL is repaired when possible. #12406 +* [BUGFIX] Storage: Fix crash caused by incorrect mixed samples handling. #13055 +* [BUGFIX] TSDB: Fix compactor failures by adding min time to histogram chunks. #13062 ## 2.47.1 / 2023-10-04 diff --git a/VERSION b/VERSION index 10429c7ad..9a9feb084 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.48.0-rc.2 +2.48.0 diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 408891028..4260f3d26 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.48.0-rc.2", + "version": "0.48.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.48.0-rc.2", + "@prometheus-io/lezer-promql": "0.48.0", "lru-cache": "^7.18.3" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 9f4a19d00..d6783b0f8 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.48.0-rc.2", + "version": "0.48.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 43cda8b13..8fa44937b 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.48.0-rc.2", + "version": "0.48.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.48.0-rc.2", + "version": "0.48.0", "workspaces": [ "react-app", "module/*" @@ -30,10 +30,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.48.0-rc.2", + "version": "0.48.0", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.48.0-rc.2", + "@prometheus-io/lezer-promql": "0.48.0", "lru-cache": "^7.18.3" }, "devDependencies": { @@ -70,7 +70,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.48.0-rc.2", + "version": "0.48.0", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.2.3", @@ -20764,7 +20764,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.48.0-rc.2", + "version": "0.48.0", "dependencies": { "@codemirror/autocomplete": "^6.7.1", "@codemirror/commands": "^6.2.4", @@ -20782,7 +20782,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.48.0-rc.2", + "@prometheus-io/codemirror-promql": "0.48.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.0", @@ -23422,7 +23422,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.48.0-rc.2", + "@prometheus-io/codemirror-promql": "0.48.0", "@testing-library/react-hooks": "^7.0.2", "@types/enzyme": "^3.10.13", "@types/flot": "0.0.32", @@ -23486,7 +23486,7 @@ "@lezer/common": "^1.0.3", "@lezer/highlight": "^1.1.6", "@lezer/lr": "^1.3.6", - "@prometheus-io/lezer-promql": "0.48.0-rc.2", + "@prometheus-io/lezer-promql": "0.48.0", "isomorphic-fetch": "^3.0.0", "lru-cache": "^7.18.3", "nock": "^13.3.1" diff --git a/web/ui/package.json b/web/ui/package.json index 9b4dfbc62..acdef8236 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -28,5 +28,5 @@ "ts-jest": "^29.1.0", "typescript": "^4.9.5" }, - "version": "0.48.0-rc.2" + "version": "0.48.0" } diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index de0d0e315..a19fc09a5 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.48.0-rc.2", + "version": "0.48.0", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.7.1", @@ -19,7 +19,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.48.0-rc.2", + "@prometheus-io/codemirror-promql": "0.48.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.0", From 10ebeb0a629e7c4880347be18ae4e2d1850bdf2b Mon Sep 17 00:00:00 2001 From: Ziqi Zhao Date: Thu, 16 Nov 2023 13:00:11 +0800 Subject: [PATCH 592/632] rename lastSum -> lastCount && enrich test case with expectBucketCount and expectSchema Signed-off-by: Ziqi Zhao --- scrape/target.go | 8 +++---- scrape/target_test.go | 50 ++++++++++++++++++++++++++++--------------- 2 files changed, 37 insertions(+), 21 deletions(-) diff --git a/scrape/target.go b/scrape/target.go index ed4c598b7..72b91a41e 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -367,18 +367,18 @@ type bucketLimitAppender struct { func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if h != nil { for len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit { - lastSum := len(h.PositiveBuckets) + len(h.NegativeBuckets) + lastCount := len(h.PositiveBuckets) + len(h.NegativeBuckets) h = h.ReduceResolution(h.Schema - 1) - if len(h.PositiveBuckets)+len(h.NegativeBuckets) == lastSum { + if len(h.PositiveBuckets)+len(h.NegativeBuckets) == lastCount { return 0, errBucketLimit } } } if fh != nil { for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit { - lastSum := len(fh.PositiveBuckets) + len(fh.NegativeBuckets) + lastCount := len(fh.PositiveBuckets) + len(fh.NegativeBuckets) fh = fh.ReduceResolution(fh.Schema - 1) - if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) == lastSum { + if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) == lastCount { return 0, errBucketLimit } } diff --git a/scrape/target_test.go b/scrape/target_test.go index f676b85c0..604cfa995 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -508,9 +508,11 @@ func TestBucketLimitAppender(t *testing.T) { } cases := []struct { - h histogram.Histogram - limit int - expectError bool + h histogram.Histogram + limit int + expectError bool + expectBucketCount int + expectSchema int32 }{ { h: example, @@ -518,14 +520,18 @@ func TestBucketLimitAppender(t *testing.T) { expectError: true, }, { - h: example, - limit: 4, - expectError: false, + h: example, + limit: 4, + expectError: false, + expectBucketCount: 4, + expectSchema: -1, }, { - h: example, - limit: 10, - expectError: false, + h: example, + limit: 10, + expectError: false, + expectBucketCount: 6, + expectSchema: 0, }, } @@ -536,18 +542,28 @@ func TestBucketLimitAppender(t *testing.T) { t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) { app := &bucketLimitAppender{Appender: resApp, limit: c.limit} ts := int64(10 * time.Minute / time.Millisecond) - h := c.h lbls := labels.FromStrings("__name__", "sparse_histogram_series") var err error if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat()) + fh := c.h.Copy().ToFloat() + _, err = app.AppendHistogram(0, lbls, ts, nil, fh) + if c.expectError { + require.Error(t, err) + } else { + require.Equal(t, c.expectSchema, fh.Schema) + require.Equal(t, c.expectBucketCount, len(fh.NegativeBuckets)+len(fh.PositiveBuckets)) + require.NoError(t, err) + } } else { - _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) - } - if c.expectError { - require.Error(t, err) - } else { - require.NoError(t, err) + h := c.h.Copy() + _, err = app.AppendHistogram(0, lbls, ts, h, nil) + if c.expectError { + require.Error(t, err) + } else { + require.Equal(t, c.expectSchema, h.Schema) + require.Equal(t, c.expectBucketCount, len(h.NegativeBuckets)+len(h.PositiveBuckets)) + require.NoError(t, err) + } } require.NoError(t, app.Commit()) }) From 32ee1b15de6220ab975f3dac7eb82131a0b1e95f Mon Sep 17 00:00:00 2001 From: zenador Date: Thu, 16 Nov 2023 22:07:37 +0800 Subject: [PATCH 593/632] Fix error on ingesting out-of-order exemplars (#13021) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix and improve ingesting exemplars for native histograms. See code comment for a detailed explanation of the algorithm. Note that this changes the current behavior for all kind of samples slightly: We now allow exemplars with the same timestamp as during the last scrape if the value or the labels have changed. Also note that we now do not ingest exemplars without timestamps for native histograms anymore. Signed-off-by: Jeanette Tan Signed-off-by: György Krajcsovits Co-authored-by: Björn Rabenstein --------- Signed-off-by: Jeanette Tan Signed-off-by: György Krajcsovits Signed-off-by: zenador Co-authored-by: György Krajcsovits Co-authored-by: Björn Rabenstein --- model/textparse/protobufparse.go | 14 ++++-- model/textparse/protobufparse_test.go | 8 ---- scrape/scrape.go | 62 +++++++++++++++++++-------- scrape/scrape_test.go | 47 +++++++++++++++++--- tsdb/exemplar.go | 19 +++++++- 5 files changed, 113 insertions(+), 37 deletions(-) diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index 9a6dd6f6d..23afb5c59 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -317,22 +317,28 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { exProto = m.GetCounter().GetExemplar() case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: bb := m.GetHistogram().GetBucket() + isClassic := p.state == EntrySeries if p.fieldPos < 0 { - if p.state == EntrySeries { + if isClassic { return false // At _count or _sum. } p.fieldPos = 0 // Start at 1st bucket for native histograms. } for p.fieldPos < len(bb) { exProto = bb[p.fieldPos].GetExemplar() - if p.state == EntrySeries { + if isClassic { break } p.fieldPos++ - if exProto != nil { - break + // We deliberately drop exemplars with no timestamp only for native histograms. + if exProto != nil && (isClassic || exProto.GetTimestamp() != nil) { + break // Found a classic histogram exemplar or a native histogram exemplar with a timestamp. } } + // If the last exemplar for native histograms has no timestamp, ignore it. + if !isClassic && exProto.GetTimestamp() == nil { + return false + } default: return false } diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index 10ec5f440..d83f2088a 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -729,7 +729,6 @@ func TestProtobufParse(t *testing.T) { ), e: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, { @@ -766,7 +765,6 @@ func TestProtobufParse(t *testing.T) { ), e: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, { @@ -802,7 +800,6 @@ func TestProtobufParse(t *testing.T) { ), e: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, { @@ -839,7 +836,6 @@ func TestProtobufParse(t *testing.T) { ), e: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, { @@ -1233,7 +1229,6 @@ func TestProtobufParse(t *testing.T) { ), e: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, { // 12 @@ -1328,7 +1323,6 @@ func TestProtobufParse(t *testing.T) { ), e: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, { // 21 @@ -1422,7 +1416,6 @@ func TestProtobufParse(t *testing.T) { ), e: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, { // 30 @@ -1517,7 +1510,6 @@ func TestProtobufParse(t *testing.T) { ), e: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, { // 39 diff --git a/scrape/scrape.go b/scrape/scrape.go index 790ee18af..1bcc333d8 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -24,6 +24,7 @@ import ( "math" "net/http" "reflect" + "sort" "strconv" "strings" "sync" @@ -1404,6 +1405,8 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, metadataChanged bool ) + exemplars := make([]exemplar.Exemplar, 1) + // updateMetadata updates the current iteration's metadata object and the // metadataChanged value if we have metadata in the scrape cache AND the // labelset is for a new series or the metadata for this series has just @@ -1569,18 +1572,55 @@ loop: // Increment added even if there's an error so we correctly report the // number of samples remaining after relabeling. added++ - + exemplars = exemplars[:0] // Reset and reuse the exemplar slice. for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) { if !e.HasTs { + if isHistogram { + // We drop exemplars for native histograms if they don't have a timestamp. + // Missing timestamps are deliberately not supported as we want to start + // enforcing timestamps for exemplars as otherwise proper deduplication + // is inefficient and purely based on heuristics: we cannot distinguish + // between repeated exemplars and new instances with the same values. + // This is done silently without logs as it is not an error but out of spec. + // This does not affect classic histograms so that behaviour is unchanged. + e = exemplar.Exemplar{} // Reset for next time round loop. + continue + } e.Ts = t } + exemplars = append(exemplars, e) + e = exemplar.Exemplar{} // Reset for next time round loop. + } + sort.Slice(exemplars, func(i, j int) bool { + // Sort first by timestamp, then value, then labels so the checking + // for duplicates / out of order is more efficient during validation. + if exemplars[i].Ts != exemplars[j].Ts { + return exemplars[i].Ts < exemplars[j].Ts + } + if exemplars[i].Value != exemplars[j].Value { + return exemplars[i].Value < exemplars[j].Value + } + return exemplars[i].Labels.Hash() < exemplars[j].Labels.Hash() + }) + outOfOrderExemplars := 0 + for _, e := range exemplars { _, exemplarErr := app.AppendExemplar(ref, lset, e) - exemplarErr = sl.checkAddExemplarError(exemplarErr, e, &appErrs) - if exemplarErr != nil { + switch { + case exemplarErr == nil: + // Do nothing. + case errors.Is(exemplarErr, storage.ErrOutOfOrderExemplar): + outOfOrderExemplars++ + default: // Since exemplar storage is still experimental, we don't fail the scrape on ingestion errors. level.Debug(sl.l).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr) } - e = exemplar.Exemplar{} // reset for next time round loop + } + if outOfOrderExemplars > 0 && outOfOrderExemplars == len(exemplars) { + // Only report out of order exemplars if all are out of order, otherwise this was a partial update + // to some existing set of exemplars. + appErrs.numExemplarOutOfOrder += outOfOrderExemplars + level.Debug(sl.l).Log("msg", "Out of order exemplars", "count", outOfOrderExemplars, "latest", fmt.Sprintf("%+v", exemplars[len(exemplars)-1])) + sl.metrics.targetScrapeExemplarOutOfOrder.Add(float64(outOfOrderExemplars)) } if sl.appendMetadataToWAL && metadataChanged { @@ -1673,20 +1713,6 @@ func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err e } } -func (sl *scrapeLoop) checkAddExemplarError(err error, e exemplar.Exemplar, appErrs *appendErrors) error { - switch { - case errors.Is(err, storage.ErrNotFound): - return storage.ErrNotFound - case errors.Is(err, storage.ErrOutOfOrderExemplar): - appErrs.numExemplarOutOfOrder++ - level.Debug(sl.l).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e)) - sl.metrics.targetScrapeExemplarOutOfOrder.Inc() - return nil - default: - return err - } -} - // The constants are suffixed with the invalid \xff unicode rune to avoid collisions // with scraped metrics in the cache. var ( diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index a2e0d00c6..522d2e1f8 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -2155,7 +2155,7 @@ metric_total{n="2"} 2 # {t="2"} 2.0 20000 }, }, { - title: "Native histogram with two exemplars", + title: "Native histogram with three exemplars", scrapeText: `name: "test_histogram" help: "Test histogram with many buckets removed to keep it manageable in size." type: HISTOGRAM @@ -2193,6 +2193,21 @@ metric: < value: -0.00029 > > + bucket: < + cumulative_count: 32 + upper_bound: -0.0001899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "58215" + > + value: -0.00019 + timestamp: < + seconds: 1625851055 + nanos: 146848599 + > + > + > schema: 3 zero_threshold: 2.938735877055719e-39 zero_count: 2 @@ -2248,12 +2263,13 @@ metric: < }, }}, exemplars: []exemplar.Exemplar{ + // Native histogram exemplars are arranged by timestamp, and those with missing timestamps are dropped. + {Labels: labels.FromStrings("dummyID", "58215"), Value: -0.00019, Ts: 1625851055146, HasTs: true}, {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, Ts: 1234568, HasTs: false}, }, }, { - title: "Native histogram with two exemplars scraped as classic histogram", + title: "Native histogram with three exemplars scraped as classic histogram", scrapeText: `name: "test_histogram" help: "Test histogram with many buckets removed to keep it manageable in size." type: HISTOGRAM @@ -2291,6 +2307,21 @@ metric: < value: -0.00029 > > + bucket: < + cumulative_count: 32 + upper_bound: -0.0001899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "58215" + > + value: -0.00019 + timestamp: < + seconds: 1625851055 + nanos: 146848599 + > + > + > schema: 3 zero_threshold: 2.938735877055719e-39 zero_count: 2 @@ -2332,6 +2363,7 @@ metric: < {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0004899999999999998"), t: 1234568, f: 2}, {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0003899999999999998"), t: 1234568, f: 4}, {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0002899999999999998"), t: 1234568, f: 16}, + {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0001899999999999998"), t: 1234568, f: 32}, {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), t: 1234568, f: 175}, }, histograms: []histogramSample{{ @@ -2355,10 +2387,15 @@ metric: < }, }}, exemplars: []exemplar.Exemplar{ + // Native histogram one is arranged by timestamp. + // Exemplars with missing timestamps are dropped for native histograms. + {Labels: labels.FromStrings("dummyID", "58215"), Value: -0.00019, Ts: 1625851055146, HasTs: true}, + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true}, + // Classic histogram one is in order of appearance. + // Exemplars with missing timestamps are supported for classic histograms. {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true}, {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, Ts: 1234568, HasTs: false}, - {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, Ts: 1234568, HasTs: false}, + {Labels: labels.FromStrings("dummyID", "58215"), Value: -0.00019, Ts: 1625851055146, HasTs: true}, }, }, } diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index 904fc7c2b..8eaf42653 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -245,11 +245,26 @@ func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemp // Check for duplicate vs last stored exemplar for this series. // NB these are expected, and appending them is a no-op. - if ce.exemplars[idx.newest].exemplar.Equals(e) { + // For floats and classic histograms, there is only 1 exemplar per series, + // so this is sufficient. For native histograms with multiple exemplars per series, + // we have another check below. + newestExemplar := ce.exemplars[idx.newest].exemplar + if newestExemplar.Equals(e) { return storage.ErrDuplicateExemplar } - if e.Ts <= ce.exemplars[idx.newest].exemplar.Ts { + // Since during the scrape the exemplars are sorted first by timestamp, then value, then labels, + // if any of these conditions are true, we know that the exemplar is either a duplicate + // of a previous one (but not the most recent one as that is checked above) or out of order. + // We now allow exemplars with duplicate timestamps as long as they have different values and/or labels + // since that can happen for different buckets of a native histogram. + // We do not distinguish between duplicates and out of order as iterating through the exemplars + // to check for that would be expensive (versus just comparing with the most recent one) especially + // since this is run under a lock, and not worth it as we just need to return an error so we do not + // append the exemplar. + if e.Ts < newestExemplar.Ts || + (e.Ts == newestExemplar.Ts && e.Value < newestExemplar.Value) || + (e.Ts == newestExemplar.Ts && e.Value == newestExemplar.Value && e.Labels.Hash() < newestExemplar.Labels.Hash()) { if appended { ce.metrics.outOfOrderExemplars.Inc() } From f997c72f294c0f18ca13fa06d51889af04135195 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Fri, 17 Nov 2023 12:29:36 +0100 Subject: [PATCH 594/632] Make head block ULIDs descriptive (#13100) * Make head block ULIDs descriptive As far as I understand, these ULIDs aren't persisted anywhere, so it should be safe to change them. When debugging an issue, seeing an ULID like `2ZBXFNYVVFDXFPGSB1CHFNYQTZ` or `33DXR7JA39CHDKMQ9C40H6YVVF` isn't very helpful, so I propose to make them readable in their ULID string version. Signed-off-by: Oleg Zaytsev * Set a different ULID for RangeHead Signed-off-by: Oleg Zaytsev --------- Signed-off-by: Oleg Zaytsev --- tsdb/head.go | 10 ++++++---- tsdb/ooo_head.go | 8 +++++--- tsdb/ooo_head_read.go | 7 ++++--- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/tsdb/head.go b/tsdb/head.go index f7e697e54..d3b2b09cc 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -1407,11 +1407,13 @@ func (h *RangeHead) NumSeries() uint64 { return h.head.NumSeries() } +var rangeHeadULID = ulid.MustParse("0000000000XXXXXXXRANGEHEAD") + func (h *RangeHead) Meta() BlockMeta { return BlockMeta{ MinTime: h.MinTime(), MaxTime: h.MaxTime(), - ULID: h.head.Meta().ULID, + ULID: rangeHeadULID, Stats: BlockStats{ NumSeries: h.NumSeries(), }, @@ -1537,15 +1539,15 @@ func (h *Head) NumSeries() uint64 { return h.numSeries.Load() } +var headULID = ulid.MustParse("0000000000XXXXXXXXXXXXHEAD") + // Meta returns meta information about the head. // The head is dynamic so will return dynamic results. func (h *Head) Meta() BlockMeta { - var id [16]byte - copy(id[:], "______head______") return BlockMeta{ MinTime: h.MinTime(), MaxTime: h.MaxTime(), - ULID: ulid.ULID(id), + ULID: headULID, Stats: BlockStats{ NumSeries: h.NumSeries(), }, diff --git a/tsdb/ooo_head.go b/tsdb/ooo_head.go index 45827889e..1251af4a9 100644 --- a/tsdb/ooo_head.go +++ b/tsdb/ooo_head.go @@ -17,6 +17,8 @@ import ( "fmt" "sort" + "github.com/oklog/ulid" + "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/tombstones" ) @@ -135,13 +137,13 @@ func (oh *OOORangeHead) Tombstones() (tombstones.Reader, error) { return tombstones.NewMemTombstones(), nil } +var oooRangeHeadULID = ulid.MustParse("0000000000XXXX000RANGEHEAD") + func (oh *OOORangeHead) Meta() BlockMeta { - var id [16]byte - copy(id[:], "____ooo_head____") return BlockMeta{ MinTime: oh.mint, MaxTime: oh.maxt, - ULID: id, + ULID: oooRangeHeadULID, Stats: BlockStats{ NumSeries: oh.head.NumSeries(), }, diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index a7a1e9da2..b9c2dc4a5 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -18,6 +18,7 @@ import ( "errors" "math" + "github.com/oklog/ulid" "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" @@ -371,13 +372,13 @@ func (ch *OOOCompactionHead) Tombstones() (tombstones.Reader, error) { return tombstones.NewMemTombstones(), nil } +var oooCompactionHeadULID = ulid.MustParse("0000000000XX000COMPACTHEAD") + func (ch *OOOCompactionHead) Meta() BlockMeta { - var id [16]byte - copy(id[:], "copy(id[:], \"ooo_compact_head\")") return BlockMeta{ MinTime: ch.mint, MaxTime: ch.maxt, - ULID: id, + ULID: oooCompactionHeadULID, Stats: BlockStats{ NumSeries: uint64(len(ch.postings)), }, From a3e02f35d63c1d92ac607d33da7a9661f53ae1cb Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 17 Nov 2023 18:19:40 +0000 Subject: [PATCH 595/632] labels: extract common code between slice and stringlabels This reduces bulk and should avoid issues if a fix is made in one file and not the other. A few methods now call `Range()` instead of `range`, but nothing performance-sensitive. Signed-off-by: Bryan Boreham --- model/labels/labels.go | 214 ------------------------- model/labels/labels_common.go | 235 ++++++++++++++++++++++++++++ model/labels/labels_stringlabels.go | 226 -------------------------- 3 files changed, 235 insertions(+), 440 deletions(-) create mode 100644 model/labels/labels_common.go diff --git a/model/labels/labels.go b/model/labels/labels.go index 231460ea3..bf67224bb 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -17,32 +17,12 @@ package labels import ( "bytes" - "encoding/json" - "strconv" "strings" "github.com/cespare/xxhash/v2" - "github.com/prometheus/common/model" "golang.org/x/exp/slices" ) -// Well-known label names used by Prometheus components. -const ( - MetricName = "__name__" - AlertName = "alertname" - BucketLabel = "le" - InstanceName = "instance" - - labelSep = '\xfe' -) - -var seps = []byte{'\xff'} - -// Label is a key/value pair of strings. -type Label struct { - Name, Value string -} - // Labels is a sorted set of labels. Order has to be guaranteed upon // instantiation. type Labels []Label @@ -51,23 +31,6 @@ func (ls Labels) Len() int { return len(ls) } func (ls Labels) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name } -func (ls Labels) String() string { - var b bytes.Buffer - - b.WriteByte('{') - for i, l := range ls { - if i > 0 { - b.WriteByte(',') - b.WriteByte(' ') - } - b.WriteString(l.Name) - b.WriteByte('=') - b.WriteString(strconv.Quote(l.Value)) - } - b.WriteByte('}') - return b.String() -} - // Bytes returns ls as a byte slice. // It uses an byte invalid character as a separator and so should not be used for printing. func (ls Labels) Bytes(buf []byte) []byte { @@ -84,40 +47,6 @@ func (ls Labels) Bytes(buf []byte) []byte { return b.Bytes() } -// MarshalJSON implements json.Marshaler. -func (ls Labels) MarshalJSON() ([]byte, error) { - return json.Marshal(ls.Map()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (ls *Labels) UnmarshalJSON(b []byte) error { - var m map[string]string - - if err := json.Unmarshal(b, &m); err != nil { - return err - } - - *ls = FromMap(m) - return nil -} - -// MarshalYAML implements yaml.Marshaler. -func (ls Labels) MarshalYAML() (interface{}, error) { - return ls.Map(), nil -} - -// UnmarshalYAML implements yaml.Unmarshaler. -func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { - var m map[string]string - - if err := unmarshal(&m); err != nil { - return err - } - - *ls = FromMap(m) - return nil -} - // MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. // If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. func (ls Labels) MatchLabels(on bool, names ...string) Labels { @@ -318,19 +247,6 @@ func (ls Labels) WithoutEmpty() Labels { return ls } -// IsValid checks if the metric name or label names are valid. -func (ls Labels) IsValid() bool { - for _, l := range ls { - if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { - return false - } - if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { - return false - } - } - return true -} - // Equal returns whether the two label sets are equal. func Equal(ls, o Labels) bool { if len(ls) != len(o) { @@ -344,15 +260,6 @@ func Equal(ls, o Labels) bool { return true } -// Map returns a string map of the labels. -func (ls Labels) Map() map[string]string { - m := make(map[string]string, len(ls)) - for _, l := range ls { - m[l.Name] = l.Value - } - return m -} - // EmptyLabels returns n empty Labels value, for convenience. func EmptyLabels() Labels { return Labels{} @@ -368,15 +275,6 @@ func New(ls ...Label) Labels { return set } -// FromMap returns new sorted Labels from the given map. -func FromMap(m map[string]string) Labels { - l := make([]Label, 0, len(m)) - for k, v := range m { - l = append(l, Label{Name: k, Value: v}) - } - return New(l...) -} - // FromStrings creates new labels from pairs of strings. func FromStrings(ss ...string) Labels { if len(ss)%2 != 0 { @@ -460,118 +358,6 @@ func (ls Labels) ReleaseStrings(release func(string)) { } } -// Builder allows modifying Labels. -type Builder struct { - base Labels - del []string - add []Label -} - -// NewBuilder returns a new LabelsBuilder. -func NewBuilder(base Labels) *Builder { - b := &Builder{ - del: make([]string, 0, 5), - add: make([]Label, 0, 5), - } - b.Reset(base) - return b -} - -// Reset clears all current state for the builder. -func (b *Builder) Reset(base Labels) { - b.base = base - b.del = b.del[:0] - b.add = b.add[:0] - for _, l := range b.base { - if l.Value == "" { - b.del = append(b.del, l.Name) - } - } -} - -// Del deletes the label of the given name. -func (b *Builder) Del(ns ...string) *Builder { - for _, n := range ns { - for i, a := range b.add { - if a.Name == n { - b.add = append(b.add[:i], b.add[i+1:]...) - } - } - b.del = append(b.del, n) - } - return b -} - -// Keep removes all labels from the base except those with the given names. -func (b *Builder) Keep(ns ...string) *Builder { -Outer: - for _, l := range b.base { - for _, n := range ns { - if l.Name == n { - continue Outer - } - } - b.del = append(b.del, l.Name) - } - return b -} - -// Set the name/value pair as a label. A value of "" means delete that label. -func (b *Builder) Set(n, v string) *Builder { - if v == "" { - // Empty labels are the same as missing labels. - return b.Del(n) - } - for i, a := range b.add { - if a.Name == n { - b.add[i].Value = v - return b - } - } - b.add = append(b.add, Label{Name: n, Value: v}) - - return b -} - -func (b *Builder) Get(n string) string { - // Del() removes entries from .add but Set() does not remove from .del, so check .add first. - for _, a := range b.add { - if a.Name == n { - return a.Value - } - } - if slices.Contains(b.del, n) { - return "" - } - return b.base.Get(n) -} - -// Range calls f on each label in the Builder. -func (b *Builder) Range(f func(l Label)) { - // Stack-based arrays to avoid heap allocation in most cases. - var addStack [128]Label - var delStack [128]string - // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). - origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) - b.base.Range(func(l Label) { - if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) { - f(l) - } - }) - for _, a := range origAdd { - f(a) - } -} - -func contains(s []Label, n string) bool { - for _, a := range s { - if a.Name == n { - return true - } - } - return false -} - // Labels returns the labels from the builder. // If no modifications were made, the original labels are returned. func (b *Builder) Labels() Labels { diff --git a/model/labels/labels_common.go b/model/labels/labels_common.go new file mode 100644 index 000000000..2a722b84c --- /dev/null +++ b/model/labels/labels_common.go @@ -0,0 +1,235 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package labels + +import ( + "bytes" + "encoding/json" + "strconv" + + "github.com/prometheus/common/model" + "golang.org/x/exp/slices" +) + +const ( + MetricName = "__name__" + AlertName = "alertname" + BucketLabel = "le" + InstanceName = "instance" + + labelSep = '\xfe' +) + +var seps = []byte{'\xff'} + +// Label is a key/value pair of strings. +type Label struct { + Name, Value string +} + +func (ls Labels) String() string { + var b bytes.Buffer + + b.WriteByte('{') + i := 0 + ls.Range(func(l Label) { + if i > 0 { + b.WriteByte(',') + b.WriteByte(' ') + } + b.WriteString(l.Name) + b.WriteByte('=') + b.WriteString(strconv.Quote(l.Value)) + i++ + }) + b.WriteByte('}') + return b.String() +} + +// MarshalJSON implements json.Marshaler. +func (ls Labels) MarshalJSON() ([]byte, error) { + return json.Marshal(ls.Map()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (ls *Labels) UnmarshalJSON(b []byte) error { + var m map[string]string + + if err := json.Unmarshal(b, &m); err != nil { + return err + } + + *ls = FromMap(m) + return nil +} + +// MarshalYAML implements yaml.Marshaler. +func (ls Labels) MarshalYAML() (interface{}, error) { + return ls.Map(), nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { + var m map[string]string + + if err := unmarshal(&m); err != nil { + return err + } + + *ls = FromMap(m) + return nil +} + +// IsValid checks if the metric name or label names are valid. +func (ls Labels) IsValid() bool { + err := ls.Validate(func(l Label) error { + if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { + return strconv.ErrSyntax + } + if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { + return strconv.ErrSyntax + } + return nil + }) + return err == nil +} + +// Map returns a string map of the labels. +func (ls Labels) Map() map[string]string { + m := make(map[string]string) + ls.Range(func(l Label) { + m[l.Name] = l.Value + }) + return m +} + +// FromMap returns new sorted Labels from the given map. +func FromMap(m map[string]string) Labels { + l := make([]Label, 0, len(m)) + for k, v := range m { + l = append(l, Label{Name: k, Value: v}) + } + return New(l...) +} + +// Builder allows modifying Labels. +type Builder struct { + base Labels + del []string + add []Label +} + +// NewBuilder returns a new LabelsBuilder. +func NewBuilder(base Labels) *Builder { + b := &Builder{ + del: make([]string, 0, 5), + add: make([]Label, 0, 5), + } + b.Reset(base) + return b +} + +// Reset clears all current state for the builder. +func (b *Builder) Reset(base Labels) { + b.base = base + b.del = b.del[:0] + b.add = b.add[:0] + b.base.Range(func(l Label) { + if l.Value == "" { + b.del = append(b.del, l.Name) + } + }) +} + +// Del deletes the label of the given name. +func (b *Builder) Del(ns ...string) *Builder { + for _, n := range ns { + for i, a := range b.add { + if a.Name == n { + b.add = append(b.add[:i], b.add[i+1:]...) + } + } + b.del = append(b.del, n) + } + return b +} + +// Keep removes all labels from the base except those with the given names. +func (b *Builder) Keep(ns ...string) *Builder { + b.base.Range(func(l Label) { + for _, n := range ns { + if l.Name == n { + return + } + } + b.del = append(b.del, l.Name) + }) + return b +} + +// Set the name/value pair as a label. A value of "" means delete that label. +func (b *Builder) Set(n, v string) *Builder { + if v == "" { + // Empty labels are the same as missing labels. + return b.Del(n) + } + for i, a := range b.add { + if a.Name == n { + b.add[i].Value = v + return b + } + } + b.add = append(b.add, Label{Name: n, Value: v}) + + return b +} + +func (b *Builder) Get(n string) string { + // Del() removes entries from .add but Set() does not remove from .del, so check .add first. + for _, a := range b.add { + if a.Name == n { + return a.Value + } + } + if slices.Contains(b.del, n) { + return "" + } + return b.base.Get(n) +} + +// Range calls f on each label in the Builder. +func (b *Builder) Range(f func(l Label)) { + // Stack-based arrays to avoid heap allocation in most cases. + var addStack [128]Label + var delStack [128]string + // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). + origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) + b.base.Range(func(l Label) { + if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) { + f(l) + } + }) + for _, a := range origAdd { + f(a) + } +} + +func contains(s []Label, n string) bool { + for _, a := range s { + if a.Name == n { + return true + } + } + return false +} diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index bbb4452d4..d79a83679 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -16,33 +16,14 @@ package labels import ( - "bytes" - "encoding/json" "reflect" - "strconv" "strings" "unsafe" "github.com/cespare/xxhash/v2" - "github.com/prometheus/common/model" "golang.org/x/exp/slices" ) -// Well-known label names used by Prometheus components. -const ( - MetricName = "__name__" - AlertName = "alertname" - BucketLabel = "le" - InstanceName = "instance" -) - -var seps = []byte{'\xff'} - -// Label is a key/value pair of strings. -type Label struct { - Name, Value string -} - // Labels is implemented by a single flat string holding name/value pairs. // Each name and value is preceded by its length in varint encoding. // Names are in order. @@ -77,26 +58,6 @@ func decodeString(data string, index int) (string, int) { return data[index : index+size], index + size } -func (ls Labels) String() string { - var b bytes.Buffer - - b.WriteByte('{') - for i := 0; i < len(ls.data); { - if i > 0 { - b.WriteByte(',') - b.WriteByte(' ') - } - var name, value string - name, i = decodeString(ls.data, i) - value, i = decodeString(ls.data, i) - b.WriteString(name) - b.WriteByte('=') - b.WriteString(strconv.Quote(value)) - } - b.WriteByte('}') - return b.String() -} - // Bytes returns ls as a byte slice. // It uses non-printing characters and so should not be used for printing. func (ls Labels) Bytes(buf []byte) []byte { @@ -109,45 +70,11 @@ func (ls Labels) Bytes(buf []byte) []byte { return buf } -// MarshalJSON implements json.Marshaler. -func (ls Labels) MarshalJSON() ([]byte, error) { - return json.Marshal(ls.Map()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (ls *Labels) UnmarshalJSON(b []byte) error { - var m map[string]string - - if err := json.Unmarshal(b, &m); err != nil { - return err - } - - *ls = FromMap(m) - return nil -} - -// MarshalYAML implements yaml.Marshaler. -func (ls Labels) MarshalYAML() (interface{}, error) { - return ls.Map(), nil -} - // IsZero implements yaml.IsZeroer - if we don't have this then 'omitempty' fields are always omitted. func (ls Labels) IsZero() bool { return len(ls.data) == 0 } -// UnmarshalYAML implements yaml.Unmarshaler. -func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { - var m map[string]string - - if err := unmarshal(&m); err != nil { - return err - } - - *ls = FromMap(m) - return nil -} - // MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. // If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. // TODO: This is only used in printing an error message @@ -364,37 +291,11 @@ func (ls Labels) WithoutEmpty() Labels { return ls } -// IsValid checks if the metric name or label names are valid. -func (ls Labels) IsValid() bool { - err := ls.Validate(func(l Label) error { - if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { - return strconv.ErrSyntax - } - if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { - return strconv.ErrSyntax - } - return nil - }) - return err == nil -} - // Equal returns whether the two label sets are equal. func Equal(ls, o Labels) bool { return ls.data == o.data } -// Map returns a string map of the labels. -func (ls Labels) Map() map[string]string { - m := make(map[string]string, len(ls.data)/10) - for i := 0; i < len(ls.data); { - var lName, lValue string - lName, i = decodeString(ls.data, i) - lValue, i = decodeString(ls.data, i) - m[lName] = lValue - } - return m -} - // EmptyLabels returns an empty Labels value, for convenience. func EmptyLabels() Labels { return Labels{} @@ -420,15 +321,6 @@ func New(ls ...Label) Labels { return Labels{data: yoloString(buf)} } -// FromMap returns new sorted Labels from the given map. -func FromMap(m map[string]string) Labels { - l := make([]Label, 0, len(m)) - for k, v := range m { - l = append(l, Label{Name: k, Value: v}) - } - return New(l...) -} - // FromStrings creates new labels from pairs of strings. func FromStrings(ss ...string) Labels { if len(ss)%2 != 0 { @@ -547,124 +439,6 @@ func (ls Labels) ReleaseStrings(release func(string)) { release(ls.data) } -// Builder allows modifying Labels. -type Builder struct { - base Labels - del []string - add []Label -} - -// NewBuilder returns a new LabelsBuilder. -func NewBuilder(base Labels) *Builder { - b := &Builder{ - del: make([]string, 0, 5), - add: make([]Label, 0, 5), - } - b.Reset(base) - return b -} - -// Reset clears all current state for the builder. -func (b *Builder) Reset(base Labels) { - b.base = base - b.del = b.del[:0] - b.add = b.add[:0] - for i := 0; i < len(base.data); { - var lName, lValue string - lName, i = decodeString(base.data, i) - lValue, i = decodeString(base.data, i) - if lValue == "" { - b.del = append(b.del, lName) - } - } -} - -// Del deletes the label of the given name. -func (b *Builder) Del(ns ...string) *Builder { - for _, n := range ns { - for i, a := range b.add { - if a.Name == n { - b.add = append(b.add[:i], b.add[i+1:]...) - } - } - b.del = append(b.del, n) - } - return b -} - -// Keep removes all labels from the base except those with the given names. -func (b *Builder) Keep(ns ...string) *Builder { -Outer: - for i := 0; i < len(b.base.data); { - var lName string - lName, i = decodeString(b.base.data, i) - _, i = decodeString(b.base.data, i) - for _, n := range ns { - if lName == n { - continue Outer - } - } - b.del = append(b.del, lName) - } - return b -} - -// Set the name/value pair as a label. A value of "" means delete that label. -func (b *Builder) Set(n, v string) *Builder { - if v == "" { - // Empty labels are the same as missing labels. - return b.Del(n) - } - for i, a := range b.add { - if a.Name == n { - b.add[i].Value = v - return b - } - } - b.add = append(b.add, Label{Name: n, Value: v}) - - return b -} - -func (b *Builder) Get(n string) string { - // Del() removes entries from .add but Set() does not remove from .del, so check .add first. - for _, a := range b.add { - if a.Name == n { - return a.Value - } - } - if slices.Contains(b.del, n) { - return "" - } - return b.base.Get(n) -} - -// Range calls f on each label in the Builder. -func (b *Builder) Range(f func(l Label)) { - // Stack-based arrays to avoid heap allocation in most cases. - var addStack [128]Label - var delStack [128]string - // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). - origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) - b.base.Range(func(l Label) { - if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) { - f(l) - } - }) - for _, a := range origAdd { - f(a) - } -} - -func contains(s []Label, n string) bool { - for _, a := range s { - if a.Name == n { - return true - } - } - return false -} - // Labels returns the labels from the builder. // If no modifications were made, the original labels are returned. func (b *Builder) Labels() Labels { From 7e2c6fc8f3e0e42927325bdac623ea848575a66e Mon Sep 17 00:00:00 2001 From: wangqing Date: Mon, 20 Nov 2023 17:57:43 +0800 Subject: [PATCH 596/632] fix: The automatically generated file is inconsistent with the file in the code warehouse reference: https://github.com/prometheus/prometheus/commit/3ef153b00cdd8842b49b5abe721418fa5e5de9e0 Signed-off-by: wangqing --- plugins.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins.yml b/plugins.yml index c10dabddb..c7b9d297d 100644 --- a/plugins.yml +++ b/plugins.yml @@ -18,5 +18,6 @@ - github.com/prometheus/prometheus/discovery/scaleway - github.com/prometheus/prometheus/discovery/triton - github.com/prometheus/prometheus/discovery/uyuni +- github.com/prometheus/prometheus/discovery/vultr - github.com/prometheus/prometheus/discovery/xds - github.com/prometheus/prometheus/discovery/zookeeper From 870627fbedb4dbe2a86eeb8c8795b9b2a0abb7f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Mierzwa?= Date: Mon, 20 Nov 2023 12:02:53 +0000 Subject: [PATCH 597/632] Add enable_compression scrape config option MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently Prometheus will always request gzip compression from the target when sending scrape requests. HTTP compression does reduce the amount of bytes sent over the wire and so is often desirable. The downside of compression is that it requires extra resources - cpu & memory. This also affects the resource usage on the target since it has to compress the response before sending it to Prometheus. This change adds a new option to the scrape job configuration block: enable_compression. The default is true so it remains the same as current Prometheus behaviour. Setting this option to false allows users to disable compression between Prometheus and the scraped target, which will require more bandwidth but it lowers the resource usage of both Prometheus and the target. Fixes #12319. Signed-off-by: Łukasz Mierzwa --- config/config.go | 3 + config/config_test.go | 55 ++++++++++- ...scrape_config_disable_compression.good.yml | 5 + docs/configuration/configuration.md | 4 + scrape/scrape.go | 47 ++++++--- scrape/scrape_test.go | 96 ++++++++++++++++++- 6 files changed, 192 insertions(+), 18 deletions(-) create mode 100644 config/testdata/scrape_config_disable_compression.good.yml diff --git a/config/config.go b/config/config.go index 4c73f6c49..b832ac9a1 100644 --- a/config/config.go +++ b/config/config.go @@ -158,6 +158,7 @@ var ( HonorLabels: false, HonorTimestamps: true, HTTPClientConfig: config.DefaultHTTPClientConfig, + EnableCompression: true, } // DefaultAlertmanagerConfig is the default alertmanager configuration. @@ -582,6 +583,8 @@ type ScrapeConfig struct { MetricsPath string `yaml:"metrics_path,omitempty"` // The URL scheme with which to fetch metrics from targets. Scheme string `yaml:"scheme,omitempty"` + // Indicator whether to request compressed response from the target. + EnableCompression bool `yaml:"enable_compression"` // An uncompressed response body larger than this many bytes will cause the // scrape to fail. 0 means no limit. BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"` diff --git a/config/config_test.go b/config/config_test.go index 12c9891b0..408622cd5 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -186,6 +186,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -288,6 +289,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(50 * time.Second), ScrapeTimeout: model.Duration(5 * time.Second), + EnableCompression: true, BodySizeLimit: 10 * units.MiB, SampleLimit: 1000, TargetLimit: 35, @@ -384,6 +386,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -438,6 +441,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -470,6 +474,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -508,6 +513,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -546,6 +552,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -573,6 +580,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -609,6 +617,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -642,6 +651,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -682,6 +692,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -712,6 +723,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -745,6 +757,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -771,6 +784,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -800,6 +814,7 @@ var expectedConf = &Config{ HonorTimestamps: false, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -829,6 +844,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -858,6 +874,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -884,6 +901,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -918,6 +936,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -951,6 +970,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -980,6 +1000,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -1009,6 +1030,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -1042,6 +1064,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -1078,6 +1101,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -1133,6 +1157,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -1159,6 +1184,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -1196,6 +1222,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -1239,6 +1266,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -1273,6 +1301,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -1301,6 +1330,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -1332,6 +1362,7 @@ var expectedConf = &Config{ HonorTimestamps: true, ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, BodySizeLimit: globBodySizeLimit, SampleLimit: globSampleLimit, TargetLimit: globTargetLimit, @@ -2060,9 +2091,10 @@ func TestGetScrapeConfigs(t *testing.T) { ScrapeTimeout: scrapeTimeout, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, - MetricsPath: "/metrics", - Scheme: "http", - HTTPClientConfig: config.DefaultHTTPClientConfig, + MetricsPath: "/metrics", + Scheme: "http", + EnableCompression: true, + HTTPClientConfig: config.DefaultHTTPClientConfig, ServiceDiscoveryConfigs: discovery.Configs{ discovery.StaticConfig{ { @@ -2118,6 +2150,8 @@ func TestGetScrapeConfigs(t *testing.T) { MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, + EnableCompression: true, + HTTPClientConfig: config.HTTPClientConfig{ TLSConfig: config.TLSConfig{ CertFile: filepath.FromSlash("testdata/scrape_configs/valid_cert_file"), @@ -2158,6 +2192,8 @@ func TestGetScrapeConfigs(t *testing.T) { MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, + EnableCompression: true, + ServiceDiscoveryConfigs: discovery.Configs{ &vultr.SDConfig{ HTTPClientConfig: config.HTTPClientConfig{ @@ -2210,3 +2246,16 @@ func kubernetesSDHostURL() config.URL { tURL, _ := url.Parse("https://localhost:1234") return config.URL{URL: tURL} } + +func TestScrapeConfigDisableCompression(t *testing.T) { + want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, false, log.NewNopLogger()) + require.NoError(t, err) + + out, err := yaml.Marshal(want) + + require.NoError(t, err) + got := &Config{} + require.NoError(t, yaml.UnmarshalStrict(out, got)) + + require.Equal(t, false, got.ScrapeConfigs[0].EnableCompression) +} diff --git a/config/testdata/scrape_config_disable_compression.good.yml b/config/testdata/scrape_config_disable_compression.good.yml new file mode 100644 index 000000000..c6320f7db --- /dev/null +++ b/config/testdata/scrape_config_disable_compression.good.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ['localhost:8080'] + enable_compression: false diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index dc4ea12e7..e9ff2b8f2 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -237,6 +237,10 @@ job_name: params: [ : [, ...] ] +# If enable_compression is set to "false", Prometheus will request uncompressed +# response from the scraped target. +[ enable_compression: | default = true ] + # Sets the `Authorization` header on every scrape request with the # configured username and password. # password and password_file are mutually exclusive. diff --git a/scrape/scrape.go b/scrape/scrape.go index 1bcc333d8..983bee837 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -109,6 +109,7 @@ type scrapeLoopOptions struct { scrapeClassicHistograms bool mrc []*relabel.Config cache *scrapeCache + enableCompression bool } const maxAheadTime = 10 * time.Minute @@ -163,6 +164,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed offsetSeed, opts.honorTimestamps, opts.trackTimestampsStaleness, + opts.enableCompression, opts.sampleLimit, opts.bucketLimit, opts.labelLimits, @@ -275,6 +277,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { } honorLabels = sp.config.HonorLabels honorTimestamps = sp.config.HonorTimestamps + enableCompression = sp.config.EnableCompression trackTimestampsStaleness = sp.config.TrackTimestampsStaleness mrc = sp.config.MetricRelabelConfigs ) @@ -295,11 +298,12 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { interval, timeout, err := t.intervalAndTimeout(interval, timeout) var ( s = &targetScraper{ - Target: t, - client: sp.client, - timeout: timeout, - bodySizeLimit: bodySizeLimit, - acceptHeader: acceptHeader(cfg.ScrapeProtocols), + Target: t, + client: sp.client, + timeout: timeout, + bodySizeLimit: bodySizeLimit, + acceptHeader: acceptHeader(cfg.ScrapeProtocols), + acceptEncodingHeader: acceptEncodingHeader(enableCompression), } newLoop = sp.newLoop(scrapeLoopOptions{ target: t, @@ -309,6 +313,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { labelLimits: labelLimits, honorLabels: honorLabels, honorTimestamps: honorTimestamps, + enableCompression: enableCompression, trackTimestampsStaleness: trackTimestampsStaleness, mrc: mrc, cache: cache, @@ -403,6 +408,7 @@ func (sp *scrapePool) sync(targets []*Target) { } honorLabels = sp.config.HonorLabels honorTimestamps = sp.config.HonorTimestamps + enableCompression = sp.config.EnableCompression trackTimestampsStaleness = sp.config.TrackTimestampsStaleness mrc = sp.config.MetricRelabelConfigs scrapeClassicHistograms = sp.config.ScrapeClassicHistograms @@ -419,12 +425,13 @@ func (sp *scrapePool) sync(targets []*Target) { var err error interval, timeout, err = t.intervalAndTimeout(interval, timeout) s := &targetScraper{ - Target: t, - client: sp.client, - timeout: timeout, - bodySizeLimit: bodySizeLimit, - acceptHeader: acceptHeader(sp.config.ScrapeProtocols), - metrics: sp.metrics, + Target: t, + client: sp.client, + timeout: timeout, + bodySizeLimit: bodySizeLimit, + acceptHeader: acceptHeader(sp.config.ScrapeProtocols), + acceptEncodingHeader: acceptEncodingHeader(enableCompression), + metrics: sp.metrics, } l := sp.newLoop(scrapeLoopOptions{ target: t, @@ -434,6 +441,7 @@ func (sp *scrapePool) sync(targets []*Target) { labelLimits: labelLimits, honorLabels: honorLabels, honorTimestamps: honorTimestamps, + enableCompression: enableCompression, trackTimestampsStaleness: trackTimestampsStaleness, mrc: mrc, interval: interval, @@ -647,8 +655,9 @@ type targetScraper struct { gzipr *gzip.Reader buf *bufio.Reader - bodySizeLimit int64 - acceptHeader string + bodySizeLimit int64 + acceptHeader string + acceptEncodingHeader string metrics *scrapeMetrics } @@ -670,6 +679,13 @@ func acceptHeader(sps []config.ScrapeProtocol) string { return strings.Join(vals, ",") } +func acceptEncodingHeader(enableCompression bool) string { + if enableCompression { + return "gzip" + } + return "identity" +} + var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version) func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) { @@ -679,7 +695,7 @@ func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) { return nil, err } req.Header.Add("Accept", s.acceptHeader) - req.Header.Add("Accept-Encoding", "gzip") + req.Header.Add("Accept-Encoding", s.acceptEncodingHeader) req.Header.Set("User-Agent", UserAgent) req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", strconv.FormatFloat(s.timeout.Seconds(), 'f', -1, 64)) @@ -765,6 +781,7 @@ type scrapeLoop struct { offsetSeed uint64 honorTimestamps bool trackTimestampsStaleness bool + enableCompression bool forcedErr error forcedErrMtx sync.Mutex sampleLimit int @@ -1055,6 +1072,7 @@ func newScrapeLoop(ctx context.Context, offsetSeed uint64, honorTimestamps bool, trackTimestampsStaleness bool, + enableCompression bool, sampleLimit int, bucketLimit int, labelLimits *labelLimits, @@ -1102,6 +1120,7 @@ func newScrapeLoop(ctx context.Context, appenderCtx: appenderCtx, honorTimestamps: honorTimestamps, trackTimestampsStaleness: trackTimestampsStaleness, + enableCompression: enableCompression, sampleLimit: sampleLimit, bucketLimit: bucketLimit, labelLimits: labelLimits, diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 522d2e1f8..238e90c20 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -651,6 +651,7 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) { nil, nil, 0, true, false, + true, 0, 0, nil, 1, @@ -726,6 +727,7 @@ func TestScrapeLoopStop(t *testing.T) { 0, true, false, + true, 0, 0, nil, 10*time.Millisecond, @@ -805,6 +807,7 @@ func TestScrapeLoopRun(t *testing.T) { 0, true, false, + true, 0, 0, nil, time.Second, @@ -863,6 +866,7 @@ func TestScrapeLoopRun(t *testing.T) { 0, true, false, + true, 0, 0, nil, time.Second, @@ -925,6 +929,7 @@ func TestScrapeLoopForcedErr(t *testing.T) { 0, true, false, + true, 0, 0, nil, time.Second, @@ -986,6 +991,7 @@ func TestScrapeLoopMetadata(t *testing.T) { 0, true, false, + true, 0, 0, nil, 0, @@ -1046,6 +1052,7 @@ func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) { 0, true, false, + true, 0, 0, nil, 0, @@ -1109,6 +1116,7 @@ func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) { 0, true, false, + true, 0, 0, nil, 0, @@ -1190,6 +1198,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { 0, true, false, + true, 0, 0, nil, 10*time.Millisecond, @@ -1256,6 +1265,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { 0, true, false, + true, 0, 0, nil, 10*time.Millisecond, @@ -1325,6 +1335,7 @@ func TestScrapeLoopCache(t *testing.T) { 0, true, false, + true, 0, 0, nil, 10*time.Millisecond, @@ -1411,6 +1422,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) { 0, true, false, + true, 0, 0, nil, 10*time.Millisecond, @@ -1528,6 +1540,7 @@ func TestScrapeLoopAppend(t *testing.T) { 0, true, false, + true, 0, 0, nil, 0, @@ -1626,7 +1639,7 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) { }, nil, func(ctx context.Context) storage.Appender { return app }, - nil, 0, true, false, 0, 0, nil, 0, 0, false, false, false, nil, false, newTestScrapeMetrics(t), + nil, 0, true, false, true, 0, 0, nil, 0, 0, false, false, false, nil, false, newTestScrapeMetrics(t), ) slApp := sl.appender(context.Background()) _, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)) @@ -1658,6 +1671,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { 0, true, false, + true, 0, 0, nil, 0, @@ -1719,6 +1733,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) { 0, true, false, + true, app.limit, 0, nil, 0, @@ -1799,6 +1814,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { 0, true, false, + true, app.limit, 0, nil, 0, @@ -1900,6 +1916,7 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) { 0, true, false, + true, 0, 0, nil, 0, @@ -1951,6 +1968,7 @@ func TestScrapeLoopAppendStaleness(t *testing.T) { 0, true, false, + true, 0, 0, nil, 0, @@ -2005,6 +2023,7 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) { 0, true, false, + true, 0, 0, nil, 0, @@ -2049,6 +2068,7 @@ func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T) { 0, true, true, + true, 0, 0, nil, 0, @@ -2421,6 +2441,7 @@ metric: < 0, true, false, + true, 0, 0, nil, 0, @@ -2511,6 +2532,7 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) { 0, true, false, + true, 0, 0, nil, 0, @@ -2566,6 +2588,7 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) { 0, true, false, + true, 0, 0, nil, 10*time.Millisecond, @@ -2605,6 +2628,7 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) { 0, true, false, + true, 0, 0, nil, 10*time.Millisecond, @@ -2657,6 +2681,7 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T 0, true, false, + true, 0, 0, nil, 0, @@ -2705,6 +2730,7 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) { 0, true, false, + true, 0, 0, nil, 0, @@ -2997,6 +3023,7 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) { nil, 0, true, false, + true, 0, 0, nil, 0, @@ -3041,6 +3068,7 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) { nil, 0, false, false, + true, 0, 0, nil, 0, @@ -3084,6 +3112,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) { 0, true, false, + true, 0, 0, nil, 0, @@ -3145,6 +3174,7 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) { 0, true, false, + true, 0, 0, nil, 0, @@ -3411,6 +3441,7 @@ func TestScrapeAddFast(t *testing.T) { 0, true, false, + true, 0, 0, nil, 0, @@ -3500,6 +3531,7 @@ func TestScrapeReportSingleAppender(t *testing.T) { 0, true, false, + true, 0, 0, nil, 10*time.Millisecond, @@ -3705,6 +3737,7 @@ func TestScrapeLoopLabelLimit(t *testing.T) { 0, true, false, + true, 0, 0, &test.labelLimits, 0, @@ -3911,6 +3944,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t * 0, true, true, + true, 0, 0, nil, 10*time.Millisecond, @@ -3956,3 +3990,63 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t * require.True(t, value.IsStaleNaN(appender.resultFloats[6].f), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f)) } + +func TestScrapeLoopCompression(t *testing.T) { + simpleStorage := teststorage.New(t) + defer simpleStorage.Close() + + metricsText := makeTestMetrics(10) + + for _, tc := range []struct { + enableCompression bool + acceptEncoding string + }{ + { + enableCompression: true, + acceptEncoding: "gzip", + }, + { + enableCompression: false, + acceptEncoding: "identity", + }, + } { + t.Run(fmt.Sprintf("compression=%v,acceptEncoding=%s", tc.enableCompression, tc.acceptEncoding), func(t *testing.T) { + scraped := make(chan bool) + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, tc.acceptEncoding, r.Header.Get("Accept-Encoding"), "invalid value of the Accept-Encoding header") + fmt.Fprint(w, metricsText) + close(scraped) + })) + defer ts.Close() + + config := &config.ScrapeConfig{ + JobName: "test", + SampleLimit: 100, + Scheme: "http", + ScrapeInterval: model.Duration(100 * time.Millisecond), + ScrapeTimeout: model.Duration(100 * time.Millisecond), + EnableCompression: tc.enableCompression, + } + + sp, err := newScrapePool(config, simpleStorage, 0, nil, &Options{}, newTestScrapeMetrics(t)) + require.NoError(t, err) + defer sp.stop() + + testURL, err := url.Parse(ts.URL) + require.NoError(t, err) + sp.Sync([]*targetgroup.Group{ + { + Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(testURL.Host)}}, + }, + }) + require.Equal(t, 1, len(sp.ActiveTargets())) + + select { + case <-time.After(5 * time.Second): + t.Fatalf("target was not scraped") + case <-scraped: + } + }) + } +} From 8fe9250f7d236bcb5a2b21e7357abad2e5dcb31a Mon Sep 17 00:00:00 2001 From: Ziqi Zhao Date: Tue, 21 Nov 2023 16:56:56 +0800 Subject: [PATCH 598/632] optimize the logic of break the loop of reducing resolution Signed-off-by: Ziqi Zhao --- scrape/scrape_test.go | 23 +++++++++++++++++++++++ scrape/target.go | 10 ++++------ 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index a2e0d00c6..93e163101 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1866,6 +1866,29 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { msg, err = MetricFamilyToProtobuf(histogramMetricFamily) require.NoError(t, err) + now = time.Now() + total, added, seriesAdded, err = sl.append(app, msg, "application/vnd.google.protobuf", now) + require.NoError(t, err) + require.Equal(t, 3, total) + require.Equal(t, 3, added) + require.Equal(t, 3, seriesAdded) + + err = sl.metrics.targetScrapeNativeHistogramBucketLimit.Write(&metric) + require.NoError(t, err) + metricValue = metric.GetCounter().GetValue() + require.Equal(t, beforeMetricValue, metricValue) + beforeMetricValue = metricValue + + nativeHistogram.WithLabelValues("L").Observe(100000.0) // in different bucket since > 10*1.1 + + gathered, err = registry.Gather() + require.NoError(t, err) + require.NotEmpty(t, gathered) + + histogramMetricFamily = gathered[0] + msg, err = MetricFamilyToProtobuf(histogramMetricFamily) + require.NoError(t, err) + now = time.Now() total, added, seriesAdded, err = sl.append(app, msg, "application/vnd.google.protobuf", now) if !errors.Is(err, errBucketLimit) { diff --git a/scrape/target.go b/scrape/target.go index 72b91a41e..62f662693 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -367,20 +367,18 @@ type bucketLimitAppender struct { func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if h != nil { for len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit { - lastCount := len(h.PositiveBuckets) + len(h.NegativeBuckets) - h = h.ReduceResolution(h.Schema - 1) - if len(h.PositiveBuckets)+len(h.NegativeBuckets) == lastCount { + if h.Schema == -4 { return 0, errBucketLimit } + h = h.ReduceResolution(h.Schema - 1) } } if fh != nil { for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit { - lastCount := len(fh.PositiveBuckets) + len(fh.NegativeBuckets) - fh = fh.ReduceResolution(fh.Schema - 1) - if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) == lastCount { + if fh.Schema == -4 { return 0, errBucketLimit } + fh = fh.ReduceResolution(fh.Schema - 1) } } ref, err := app.Appender.AppendHistogram(ref, lset, t, h, fh) From b37258c99b901490ccf22f85f472ba2ba8c8be33 Mon Sep 17 00:00:00 2001 From: Etourneau Gwenn Date: Tue, 21 Nov 2023 20:59:17 +0900 Subject: [PATCH 599/632] Added Caching of network interface for Azure (#12622) * Added Caching of network interface for Azure Signed-off-by: Etourneau Gwenn * Rename Counter for Azure cache Signed-off-by: Etourneau Gwenn * Format with goimports Signed-off-by: Etourneau Gwenn * Updated duration comparaison Enabled cache by default with 5x the default refresh time Signed-off-by: Etourneau Gwenn * Change random function Signed-off-by: Etourneau Gwenn * Remove refresh interval Signed-off-by: Etourneau Gwenn * Remove from config as well Signed-off-by: Etourneau Gwenn * Reformat config_test Removed uneeded error Signed-off-by: Etourneau Gwenn --------- Signed-off-by: Etourneau Gwenn --- discovery/azure/azure.go | 55 +++++++++++++++++++++++++++++++++------- go.mod | 1 + go.sum | 2 ++ 3 files changed, 49 insertions(+), 9 deletions(-) diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go index 23b3cb8c4..675ff7c21 100644 --- a/discovery/azure/azure.go +++ b/discovery/azure/azure.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "math/rand" "net" "net/http" "strings" @@ -30,10 +31,13 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + cache "github.com/Code-Hex/go-generics-cache" + "github.com/Code-Hex/go-generics-cache/policy/lru" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/model" "github.com/prometheus/common/version" @@ -80,6 +84,11 @@ var ( Name: "prometheus_sd_azure_failures_total", Help: "Number of Azure service discovery refresh failures.", }) + cacheHitCount = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_azure_cache_hit_total", + Help: "Number of cache hit during refresh.", + }) ) var environments = map[string]cloud.Configuration{ @@ -105,6 +114,7 @@ func CloudConfigurationFromName(name string) (cloud.Configuration, error) { func init() { discovery.RegisterConfig(&SDConfig{}) prometheus.MustRegister(failuresCount) + prometheus.MustRegister(cacheHitCount) } // SDConfig is the configuration for Azure based service discovery. @@ -145,7 +155,6 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if err != nil { return err } - if err = validateAuthParam(c.SubscriptionID, "subscription_id"); err != nil { return err } @@ -174,6 +183,7 @@ type Discovery struct { logger log.Logger cfg *SDConfig port int + cache *cache.Cache[string, *armnetwork.Interface] } // NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets. @@ -181,17 +191,21 @@ func NewDiscovery(cfg *SDConfig, logger log.Logger) *Discovery { if logger == nil { logger = log.NewNopLogger() } + l := cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5000))) d := &Discovery{ cfg: cfg, port: cfg.Port, logger: logger, + cache: l, } + d.Discovery = refresh.NewDiscovery( logger, "azure", time.Duration(cfg.RefreshInterval), d.refresh, ) + return d } @@ -385,15 +399,22 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { // Get the IP address information via separate call to the network provider. for _, nicID := range vm.NetworkInterfaces { - networkInterface, err := client.getNetworkInterfaceByID(ctx, nicID) - if err != nil { - if errors.Is(err, errorNotFound) { - level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err) - } else { - ch <- target{labelSet: nil, err: err} + var networkInterface *armnetwork.Interface + if v, ok := d.getFromCache(nicID); ok { + networkInterface = v + cacheHitCount.Add(1) + } else { + networkInterface, err = client.getNetworkInterfaceByID(ctx, nicID) + if err != nil { + if errors.Is(err, errorNotFound) { + level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err) + } else { + ch <- target{labelSet: nil, err: err} + } + // Get out of this routine because we cannot continue without a network interface. + return } - // Get out of this routine because we cannot continue without a network interface. - return + d.addToCache(nicID, networkInterface) } if networkInterface.Properties == nil { @@ -628,3 +649,19 @@ func (client *azureClient) getNetworkInterfaceByID(ctx context.Context, networkI return &resp.Interface, nil } + +// addToCache will add the network interface information for the specified nicID +func (d *Discovery) addToCache(nicID string, netInt *armnetwork.Interface) { + random := rand.Int63n(int64(time.Duration(d.cfg.RefreshInterval * 3).Seconds())) + rs := time.Duration(random) * time.Second + exptime := time.Duration(d.cfg.RefreshInterval*10) + rs + d.cache.Set(nicID, netInt, cache.WithExpiration(exptime)) + level.Debug(d.logger).Log("msg", "Adding nic", "nic", nicID, "time", exptime.Seconds()) +} + +// getFromCache will get the network Interface for the specified nicID +// If the cache is disabled nothing will happen +func (d *Discovery) getFromCache(nicID string) (*armnetwork.Interface, bool) { + net, found := d.cache.Get(nicID) + return net, found +} diff --git a/go.mod b/go.mod index 629dd1147..6cc2d0237 100644 --- a/go.mod +++ b/go.mod @@ -110,6 +110,7 @@ require ( ) require ( + github.com/Code-Hex/go-generics-cache v1.3.1 github.com/Microsoft/go-winio v0.6.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect diff --git a/go.sum b/go.sum index e8f2013a7..01804516b 100644 --- a/go.sum +++ b/go.sum @@ -54,6 +54,8 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Code-Hex/go-generics-cache v1.3.1 h1:i8rLwyhoyhaerr7JpjtYjJZUcCbWOdiYO3fZXLiEC4g= +github.com/Code-Hex/go-generics-cache v1.3.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= From 2329fba0e59d8c38f940d3c5a3e8954268aa9ec1 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Thu, 23 Nov 2023 20:07:23 +1100 Subject: [PATCH 600/632] Fix linting issues in comments (#13178) Signed-off-by: Charles Korn --- discovery/azure/azure.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go index 675ff7c21..faccadcf8 100644 --- a/discovery/azure/azure.go +++ b/discovery/azure/azure.go @@ -650,7 +650,7 @@ func (client *azureClient) getNetworkInterfaceByID(ctx context.Context, networkI return &resp.Interface, nil } -// addToCache will add the network interface information for the specified nicID +// addToCache will add the network interface information for the specified nicID. func (d *Discovery) addToCache(nicID string, netInt *armnetwork.Interface) { random := rand.Int63n(int64(time.Duration(d.cfg.RefreshInterval * 3).Seconds())) rs := time.Duration(random) * time.Second @@ -660,7 +660,7 @@ func (d *Discovery) addToCache(nicID string, netInt *armnetwork.Interface) { } // getFromCache will get the network Interface for the specified nicID -// If the cache is disabled nothing will happen +// If the cache is disabled nothing will happen. func (d *Discovery) getFromCache(nicID string) (*armnetwork.Interface, bool) { net, found := d.cache.Get(nicID) return net, found From 9051100abacce8003a2174a842f3641dc501aec2 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 17 Oct 2023 09:27:46 +0000 Subject: [PATCH 601/632] Scraping: share buffer pool across all scrapes Previously we had one per scrapePool, and one of those per configured scraping job. Each pool holds a few unused buffers, so sharing one across all scrapePools reduces total heap memory. Signed-off-by: Bryan Boreham --- scrape/manager.go | 5 ++++- scrape/scrape.go | 4 +--- scrape/scrape_test.go | 24 +++++++++++++----------- 3 files changed, 18 insertions(+), 15 deletions(-) diff --git a/scrape/manager.go b/scrape/manager.go index a0ac38f6b..3b70e48a1 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -32,6 +32,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/osutil" + "github.com/prometheus/prometheus/util/pool" ) // NewManager is the Manager constructor. @@ -57,6 +58,7 @@ func NewManager(o *Options, logger log.Logger, app storage.Appendable, registere graceShut: make(chan struct{}), triggerReload: make(chan struct{}, 1), metrics: sm, + buffers: pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }), } m.metrics.setTargetMetadataCacheGatherer(m) @@ -94,6 +96,7 @@ type Manager struct { scrapeConfigs map[string]*config.ScrapeConfig scrapePools map[string]*scrapePool targetSets map[string][]*targetgroup.Group + buffers *pool.Pool triggerReload chan struct{} @@ -156,7 +159,7 @@ func (m *Manager) reload() { continue } m.metrics.targetScrapePools.Inc() - sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, log.With(m.logger, "scrape_pool", setName), m.opts, m.metrics) + sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, log.With(m.logger, "scrape_pool", setName), m.buffers, m.opts, m.metrics) if err != nil { m.metrics.targetScrapePoolsFailed.Inc() level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName) diff --git a/scrape/scrape.go b/scrape/scrape.go index 983bee837..f3542935a 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -117,7 +117,7 @@ const maxAheadTime = 10 * time.Minute // returning an empty label set is interpreted as "drop". type labelsMutator func(labels.Labels) labels.Labels -func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, options *Options, metrics *scrapeMetrics) (*scrapePool, error) { +func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, buffers *pool.Pool, options *Options, metrics *scrapeMetrics) (*scrapePool, error) { if logger == nil { logger = log.NewNopLogger() } @@ -127,8 +127,6 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed return nil, fmt.Errorf("error creating HTTP client: %w", err) } - buffers := pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }) - ctx, cancel := context.WithCancel(context.Background()) sp := &scrapePool{ cancel: cancel, diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 238e90c20..41aca737f 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -49,6 +49,7 @@ import ( "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/util/pool" "github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/testutil" ) @@ -68,7 +69,7 @@ func TestNewScrapePool(t *testing.T) { var ( app = &nopAppendable{} cfg = &config.ScrapeConfig{} - sp, _ = newScrapePool(cfg, app, 0, nil, &Options{}, newTestScrapeMetrics(t)) + sp, _ = newScrapePool(cfg, app, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) ) if a, ok := sp.appendable.(*nopAppendable); !ok || a != app { @@ -104,7 +105,7 @@ func TestDroppedTargetsList(t *testing.T) { }, }, } - sp, _ = newScrapePool(cfg, app, 0, nil, &Options{}, newTestScrapeMetrics(t)) + sp, _ = newScrapePool(cfg, app, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) expectedLabelSetString = "{__address__=\"127.0.0.1:9090\", __scrape_interval__=\"0s\", __scrape_timeout__=\"0s\", job=\"dropMe\"}" expectedLength = 2 ) @@ -504,7 +505,7 @@ func TestScrapePoolTargetLimit(t *testing.T) { func TestScrapePoolAppender(t *testing.T) { cfg := &config.ScrapeConfig{} app := &nopAppendable{} - sp, _ := newScrapePool(cfg, app, 0, nil, &Options{}, newTestScrapeMetrics(t)) + sp, _ := newScrapePool(cfg, app, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) loop := sp.newLoop(scrapeLoopOptions{ target: &Target{}, @@ -560,7 +561,7 @@ func TestScrapePoolRaces(t *testing.T) { newConfig := func() *config.ScrapeConfig { return &config.ScrapeConfig{ScrapeInterval: interval, ScrapeTimeout: timeout} } - sp, _ := newScrapePool(newConfig(), &nopAppendable{}, 0, nil, &Options{}, newTestScrapeMetrics(t)) + sp, _ := newScrapePool(newConfig(), &nopAppendable{}, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) tgts := []*targetgroup.Group{ { Targets: []model.LabelSet{ @@ -3277,7 +3278,7 @@ func TestReuseScrapeCache(t *testing.T) { ScrapeInterval: model.Duration(5 * time.Second), MetricsPath: "/metrics", } - sp, _ = newScrapePool(cfg, app, 0, nil, &Options{}, newTestScrapeMetrics(t)) + sp, _ = newScrapePool(cfg, app, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) t1 = &Target{ discoveredLabels: labels.FromStrings("labelNew", "nameNew", "labelNew1", "nameNew1", "labelNew2", "nameNew2"), } @@ -3481,8 +3482,9 @@ func TestReuseCacheRace(t *testing.T) { ScrapeInterval: model.Duration(5 * time.Second), MetricsPath: "/metrics", } - sp, _ = newScrapePool(cfg, app, 0, nil, &Options{}, newTestScrapeMetrics(t)) - t1 = &Target{ + buffers = pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }) + sp, _ = newScrapePool(cfg, app, 0, nil, buffers, &Options{}, newTestScrapeMetrics(t)) + t1 = &Target{ discoveredLabels: labels.FromStrings("labelNew", "nameNew"), } ) @@ -3612,7 +3614,7 @@ func TestScrapeReportLimit(t *testing.T) { })) defer ts.Close() - sp, err := newScrapePool(cfg, s, 0, nil, &Options{}, newTestScrapeMetrics(t)) + sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) require.NoError(t, err) defer sp.stop() @@ -3786,7 +3788,7 @@ func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) { }, }, } - sp, _ := newScrapePool(config, &nopAppendable{}, 0, nil, &Options{}, newTestScrapeMetrics(t)) + sp, _ := newScrapePool(config, &nopAppendable{}, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) tgts := []*targetgroup.Group{ { Targets: []model.LabelSet{{model.AddressLabel: "127.0.0.1:9090"}}, @@ -3877,7 +3879,7 @@ test_summary_count 199 })) defer ts.Close() - sp, err := newScrapePool(config, simpleStorage, 0, nil, &Options{}, newTestScrapeMetrics(t)) + sp, err := newScrapePool(config, simpleStorage, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) require.NoError(t, err) defer sp.stop() @@ -4029,7 +4031,7 @@ func TestScrapeLoopCompression(t *testing.T) { EnableCompression: tc.enableCompression, } - sp, err := newScrapePool(config, simpleStorage, 0, nil, &Options{}, newTestScrapeMetrics(t)) + sp, err := newScrapePool(config, simpleStorage, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) require.NoError(t, err) defer sp.stop() From 0102425af102bddb7be64af9abb93af7ba5193ea Mon Sep 17 00:00:00 2001 From: Paulin Todev Date: Thu, 23 Nov 2023 11:24:08 +0000 Subject: [PATCH 602/632] Use only one scrapeMetrics object per test. (#13051) The scrape loop and scrape cache should use the same instance. This brings the tests' behavior more in line with production. Signed-off-by: Paulin Todev --- scrape/scrape_test.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 238e90c20..5e4f3f30c 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -792,8 +792,9 @@ func TestScrapeLoopRun(t *testing.T) { signal = make(chan struct{}, 1) errc = make(chan error) - scraper = &testScraper{} - app = func(ctx context.Context) storage.Appender { return &nopAppender{} } + scraper = &testScraper{} + app = func(ctx context.Context) storage.Appender { return &nopAppender{} } + scrapeMetrics = newTestScrapeMetrics(t) ) ctx, cancel := context.WithCancel(context.Background()) @@ -817,7 +818,7 @@ func TestScrapeLoopRun(t *testing.T) { false, nil, false, - newTestScrapeMetrics(t), + scrapeMetrics, ) // The loop must terminate during the initial offset if the context @@ -876,7 +877,7 @@ func TestScrapeLoopRun(t *testing.T) { false, nil, false, - newTestScrapeMetrics(t), + scrapeMetrics, ) go func() { @@ -974,9 +975,10 @@ func TestScrapeLoopForcedErr(t *testing.T) { func TestScrapeLoopMetadata(t *testing.T) { var ( - signal = make(chan struct{}) - scraper = &testScraper{} - cache = newScrapeCache(newTestScrapeMetrics(t)) + signal = make(chan struct{}) + scraper = &testScraper{} + scrapeMetrics = newTestScrapeMetrics(t) + cache = newScrapeCache(scrapeMetrics) ) defer close(signal) @@ -1001,7 +1003,7 @@ func TestScrapeLoopMetadata(t *testing.T) { false, nil, false, - newTestScrapeMetrics(t), + scrapeMetrics, ) defer cancel() From 35a15e8f04b3b5716da73c0f4faf1c2be34b79a0 Mon Sep 17 00:00:00 2001 From: Filip Petkovski Date: Thu, 23 Nov 2023 15:09:17 +0100 Subject: [PATCH 603/632] Add benchmark for native histograms (#13160) * Add benchmark for native histograms This commit adds a PromQL benchmark for queries on native histograms. Signed-off-by: Filip Petkovski --- promql/bench_test.go | 95 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/promql/bench_test.go b/promql/bench_test.go index 8e443b5a6..13eba3714 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -21,9 +21,11 @@ import ( "testing" "time" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/util/teststorage" ) @@ -269,6 +271,99 @@ func BenchmarkRangeQuery(b *testing.B) { } } +func BenchmarkNativeHistograms(b *testing.B) { + testStorage := teststorage.New(b) + defer testStorage.Close() + + app := testStorage.Appender(context.TODO()) + if err := generateNativeHistogramSeries(app, 3000); err != nil { + b.Fatal(err) + } + if err := app.Commit(); err != nil { + b.Fatal(err) + } + + start := time.Unix(0, 0) + end := start.Add(2 * time.Hour) + step := time.Second * 30 + + cases := []struct { + name string + query string + }{ + { + name: "sum", + query: "sum(native_histogram_series)", + }, + { + name: "sum rate", + query: "sum(rate(native_histogram_series[1m]))", + }, + } + + opts := EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: 50000000, + Timeout: 100 * time.Second, + EnableAtModifier: true, + EnableNegativeOffset: true, + } + + b.ResetTimer() + b.ReportAllocs() + + for _, tc := range cases { + b.Run(tc.name, func(b *testing.B) { + ng := NewEngine(opts) + for i := 0; i < b.N; i++ { + qry, err := ng.NewRangeQuery(context.Background(), testStorage, nil, tc.query, start, end, step) + if err != nil { + b.Fatal(err) + } + if result := qry.Exec(context.Background()); result.Err != nil { + b.Fatal(result.Err) + } + } + }) + } +} + +func generateNativeHistogramSeries(app storage.Appender, numSeries int) error { + commonLabels := []string{labels.MetricName, "native_histogram_series", "foo", "bar"} + series := make([][]*histogram.Histogram, numSeries) + for i := range series { + series[i] = tsdbutil.GenerateTestHistograms(2000) + } + higherSchemaHist := &histogram.Histogram{ + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -5, Length: 2}, // -5 -4 + {Offset: 2, Length: 3}, // -1 0 1 + {Offset: 2, Length: 2}, // 4 5 + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 3}, + Count: 13, + } + for sid, histograms := range series { + seriesLabels := labels.FromStrings(append(commonLabels, "h", strconv.Itoa(sid))...) + for i := range histograms { + ts := time.Unix(int64(i*15), 0).UnixMilli() + if i == 0 { + // Inject a histogram with a higher schema. + if _, err := app.AppendHistogram(0, seriesLabels, ts, higherSchemaHist, nil); err != nil { + return err + } + } + if _, err := app.AppendHistogram(0, seriesLabels, ts, histograms[i], nil); err != nil { + return err + } + } + } + + return nil +} + func BenchmarkParser(b *testing.B) { cases := []string{ "a", From 59844498f7b12f16c7f004aa951bbb14cdb83991 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Fri, 24 Nov 2023 22:38:38 +1100 Subject: [PATCH 604/632] Fix issue where queries can fail or omit OOO samples if OOO head compaction occurs between creating a querier and reading chunks (#13115) * Add failing test. Signed-off-by: Charles Korn * Don't run OOO head garbage collection while reads are running. Signed-off-by: Charles Korn * Add further test cases for different order of operations. Signed-off-by: Charles Korn * Ensure all queriers are closed if `DB.blockChunkQuerierForRange()` fails. Signed-off-by: Charles Korn * Ensure all queriers are closed if `DB.Querier()` fails. Signed-off-by: Charles Korn * Invert error handling in `DB.Querier()` and `DB.blockChunkQuerierForRange()` to make it clearer Signed-off-by: Charles Korn * Ensure that queries that touch OOO data can't block OOO head garbage collection forever. Signed-off-by: Charles Korn * Address PR feedback: fix parameter name in comment Co-authored-by: Jesus Vazquez Signed-off-by: Charles Korn * Address PR feedback: use `lastGarbageCollectedMmapRef` Signed-off-by: Charles Korn * Address PR feedback: ensure pending reads are cleaned up if creating an OOO querier fails Signed-off-by: Charles Korn --------- Signed-off-by: Charles Korn Signed-off-by: Charles Korn Co-authored-by: Jesus Vazquez --- tsdb/db.go | 128 +++++++++++------- tsdb/db_test.go | 259 +++++++++++++++++++++++++++++++++++++ tsdb/head.go | 25 +++- tsdb/ooo_head.go | 18 ++- tsdb/ooo_head_read.go | 44 ++++--- tsdb/ooo_head_read_test.go | 17 ++- tsdb/ooo_isolation.go | 79 +++++++++++ tsdb/ooo_isolation_test.go | 60 +++++++++ tsdb/querier_test.go | 2 +- 9 files changed, 546 insertions(+), 86 deletions(-) create mode 100644 tsdb/ooo_isolation.go create mode 100644 tsdb/ooo_isolation_test.go diff --git a/tsdb/db.go b/tsdb/db.go index c4c05e390..2e3801a9e 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -203,10 +203,14 @@ type DB struct { compactor Compactor blocksToDelete BlocksToDeleteFunc - // Mutex for that must be held when modifying the general block layout. + // Mutex for that must be held when modifying the general block layout or lastGarbageCollectedMmapRef. mtx sync.RWMutex blocks []*Block + // The last OOO chunk that was compacted and written to disk. New queriers must not read chunks less + // than or equal to this reference, as these chunks could be garbage collected at any time. + lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef + head *Head compactc chan struct{} @@ -1243,6 +1247,20 @@ func (db *DB) compactOOOHead(ctx context.Context) error { lastWBLFile, minOOOMmapRef := oooHead.LastWBLFile(), oooHead.LastMmapRef() if lastWBLFile != 0 || minOOOMmapRef != 0 { + if minOOOMmapRef != 0 { + // Ensure that no more queriers are created that will reference chunks we're about to garbage collect. + // truncateOOO waits for any existing queriers that reference chunks we're about to garbage collect to + // complete before running garbage collection, so we don't need to do that here. + // + // We take mtx to ensure that Querier() and ChunkQuerier() don't miss blocks: without this, they could + // capture the list of blocks before the call to reloadBlocks() above runs, but then capture + // lastGarbageCollectedMmapRef after we update it here, and therefore not query either the blocks we've just + // written or the head chunks those blocks were created from. + db.mtx.Lock() + db.lastGarbageCollectedMmapRef = minOOOMmapRef + db.mtx.Unlock() + } + if err := db.head.truncateOOO(lastWBLFile, minOOOMmapRef); err != nil { return errors.Wrap(err, "truncate ooo wbl") } @@ -1869,7 +1887,7 @@ func (db *DB) Snapshot(dir string, withHead bool) error { } // Querier returns a new querier over the data partition for the given time range. -func (db *DB) Querier(mint, maxt int64) (storage.Querier, error) { +func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { var blocks []BlockReader db.mtx.RLock() @@ -1880,11 +1898,23 @@ func (db *DB) Querier(mint, maxt int64) (storage.Querier, error) { blocks = append(blocks, b) } } - var inOrderHeadQuerier storage.Querier + + blockQueriers := make([]storage.Querier, 0, len(blocks)+2) // +2 to allow for possible in-order and OOO head queriers + + defer func() { + if err != nil { + // If we fail, all previously opened queriers must be closed. + for _, q := range blockQueriers { + // TODO(bwplotka): Handle error. + _ = q.Close() + } + } + }() + if maxt >= db.head.MinTime() { rh := NewRangeHead(db.head, mint, maxt) var err error - inOrderHeadQuerier, err = NewBlockQuerier(rh, mint, maxt) + inOrderHeadQuerier, err := NewBlockQuerier(rh, mint, maxt) if err != nil { return nil, errors.Wrapf(err, "open block querier for head %s", rh) } @@ -1906,44 +1936,40 @@ func (db *DB) Querier(mint, maxt int64) (storage.Querier, error) { return nil, errors.Wrapf(err, "open block querier for head while getting new querier %s", rh) } } + + if inOrderHeadQuerier != nil { + blockQueriers = append(blockQueriers, inOrderHeadQuerier) + } } - var outOfOrderHeadQuerier storage.Querier if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { - rh := NewOOORangeHead(db.head, mint, maxt) + rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef) var err error - outOfOrderHeadQuerier, err = NewBlockQuerier(rh, mint, maxt) + outOfOrderHeadQuerier, err := NewBlockQuerier(rh, mint, maxt) if err != nil { + // If NewBlockQuerier() failed, make sure to clean up the pending read created by NewOOORangeHead. + rh.isoState.Close() + return nil, errors.Wrapf(err, "open block querier for ooo head %s", rh) } - } - blockQueriers := make([]storage.Querier, 0, len(blocks)) - for _, b := range blocks { - q, err := NewBlockQuerier(b, mint, maxt) - if err == nil { - blockQueriers = append(blockQueriers, q) - continue - } - // If we fail, all previously opened queriers must be closed. - for _, q := range blockQueriers { - // TODO(bwplotka): Handle error. - _ = q.Close() - } - return nil, errors.Wrapf(err, "open querier for block %s", b) - } - if inOrderHeadQuerier != nil { - blockQueriers = append(blockQueriers, inOrderHeadQuerier) - } - if outOfOrderHeadQuerier != nil { blockQueriers = append(blockQueriers, outOfOrderHeadQuerier) } + + for _, b := range blocks { + q, err := NewBlockQuerier(b, mint, maxt) + if err != nil { + return nil, errors.Wrapf(err, "open querier for block %s", b) + } + blockQueriers = append(blockQueriers, q) + } + return storage.NewMergeQuerier(blockQueriers, nil, storage.ChainedSeriesMerge), nil } // blockChunkQuerierForRange returns individual block chunk queriers from the persistent blocks, in-order head block, and the // out-of-order head block, overlapping with the given time range. -func (db *DB) blockChunkQuerierForRange(mint, maxt int64) ([]storage.ChunkQuerier, error) { +func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuerier, err error) { var blocks []BlockReader db.mtx.RLock() @@ -1954,11 +1980,22 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) ([]storage.ChunkQuerie blocks = append(blocks, b) } } - var inOrderHeadQuerier storage.ChunkQuerier + + blockQueriers := make([]storage.ChunkQuerier, 0, len(blocks)+2) // +2 to allow for possible in-order and OOO head queriers + + defer func() { + if err != nil { + // If we fail, all previously opened queriers must be closed. + for _, q := range blockQueriers { + // TODO(bwplotka): Handle error. + _ = q.Close() + } + } + }() + if maxt >= db.head.MinTime() { rh := NewRangeHead(db.head, mint, maxt) - var err error - inOrderHeadQuerier, err = NewBlockChunkQuerier(rh, mint, maxt) + inOrderHeadQuerier, err := NewBlockChunkQuerier(rh, mint, maxt) if err != nil { return nil, errors.Wrapf(err, "open querier for head %s", rh) } @@ -1980,37 +2017,28 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) ([]storage.ChunkQuerie return nil, errors.Wrapf(err, "open querier for head while getting new querier %s", rh) } } + + if inOrderHeadQuerier != nil { + blockQueriers = append(blockQueriers, inOrderHeadQuerier) + } } - var outOfOrderHeadQuerier storage.ChunkQuerier if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { - rh := NewOOORangeHead(db.head, mint, maxt) - var err error - outOfOrderHeadQuerier, err = NewBlockChunkQuerier(rh, mint, maxt) + rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef) + outOfOrderHeadQuerier, err := NewBlockChunkQuerier(rh, mint, maxt) if err != nil { return nil, errors.Wrapf(err, "open block chunk querier for ooo head %s", rh) } + + blockQueriers = append(blockQueriers, outOfOrderHeadQuerier) } - blockQueriers := make([]storage.ChunkQuerier, 0, len(blocks)) for _, b := range blocks { q, err := NewBlockChunkQuerier(b, mint, maxt) - if err == nil { - blockQueriers = append(blockQueriers, q) - continue + if err != nil { + return nil, errors.Wrapf(err, "open querier for block %s", b) } - // If we fail, all previously opened queriers must be closed. - for _, q := range blockQueriers { - // TODO(bwplotka): Handle error. - _ = q.Close() - } - return nil, errors.Wrapf(err, "open querier for block %s", b) - } - if inOrderHeadQuerier != nil { - blockQueriers = append(blockQueriers, inOrderHeadQuerier) - } - if outOfOrderHeadQuerier != nil { - blockQueriers = append(blockQueriers, outOfOrderHeadQuerier) + blockQueriers = append(blockQueriers, q) } return blockQueriers, nil diff --git a/tsdb/db_test.go b/tsdb/db_test.go index c7ea068d6..5728b49bd 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -38,6 +38,7 @@ import ( "github.com/prometheus/client_golang/prometheus" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/require" + "go.uber.org/atomic" "go.uber.org/goleak" "github.com/prometheus/prometheus/config" @@ -3611,6 +3612,264 @@ func testChunkQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChun } } +func TestQuerierShouldNotFailIfOOOCompactionOccursAfterRetrievingQuerier(t *testing.T) { + opts := DefaultOptions() + opts.OutOfOrderTimeWindow = 3 * DefaultBlockDuration + db := openTestDB(t, opts, nil) + defer func() { + require.NoError(t, db.Close()) + }() + + // Disable compactions so we can control it. + db.DisableCompactions() + + metric := labels.FromStrings(labels.MetricName, "test_metric") + ctx := context.Background() + interval := int64(15 * time.Second / time.Millisecond) + ts := int64(0) + samplesWritten := 0 + + // Capture the first timestamp - this will be the timestamp of the OOO sample we'll append below. + oooTS := ts + ts += interval + + // Push samples after the OOO sample we'll write below. + for ; ts < 10*interval; ts += interval { + app := db.Appender(ctx) + _, err := app.Append(0, metric, ts, float64(ts)) + require.NoError(t, err) + require.NoError(t, app.Commit()) + samplesWritten++ + } + + // Push a single OOO sample. + app := db.Appender(ctx) + _, err := app.Append(0, metric, oooTS, float64(ts)) + require.NoError(t, err) + require.NoError(t, app.Commit()) + samplesWritten++ + + // Get a querier. + querierCreatedBeforeCompaction, err := db.ChunkQuerier(0, math.MaxInt64) + require.NoError(t, err) + + // Start OOO head compaction. + compactionComplete := atomic.NewBool(false) + go func() { + defer compactionComplete.Store(true) + + require.NoError(t, db.CompactOOOHead(ctx)) + require.Equal(t, float64(1), prom_testutil.ToFloat64(db.Head().metrics.chunksRemoved)) + }() + + // Give CompactOOOHead time to start work. + // If it does not wait for querierCreatedBeforeCompaction to be closed, then the query will return incorrect results or fail. + time.Sleep(time.Second) + require.False(t, compactionComplete.Load(), "compaction completed before reading chunks or closing querier created before compaction") + + // Get another querier. This one should only use the compacted blocks from disk and ignore the chunks that will be garbage collected. + querierCreatedAfterCompaction, err := db.ChunkQuerier(0, math.MaxInt64) + require.NoError(t, err) + + testQuerier := func(q storage.ChunkQuerier) { + // Query back the series. + hints := &storage.SelectHints{Start: 0, End: math.MaxInt64, Step: interval} + seriesSet := q.Select(ctx, true, hints, labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "test_metric")) + + // Collect the iterator for the series. + var iterators []chunks.Iterator + for seriesSet.Next() { + iterators = append(iterators, seriesSet.At().Iterator(nil)) + } + require.NoError(t, seriesSet.Err()) + require.Len(t, iterators, 1) + iterator := iterators[0] + + // Check that we can still successfully read all samples. + samplesRead := 0 + for iterator.Next() { + samplesRead += iterator.At().Chunk.NumSamples() + } + + require.NoError(t, iterator.Err()) + require.Equal(t, samplesWritten, samplesRead) + } + + testQuerier(querierCreatedBeforeCompaction) + + require.False(t, compactionComplete.Load(), "compaction completed before closing querier created before compaction") + require.NoError(t, querierCreatedBeforeCompaction.Close()) + require.Eventually(t, compactionComplete.Load, time.Second, 10*time.Millisecond, "compaction should complete after querier created before compaction was closed, and not wait for querier created after compaction") + + // Use the querier created after compaction and confirm it returns the expected results (ie. from the disk block created from OOO head and in-order head) without error. + testQuerier(querierCreatedAfterCompaction) + require.NoError(t, querierCreatedAfterCompaction.Close()) +} + +func TestQuerierShouldNotFailIfOOOCompactionOccursAfterSelecting(t *testing.T) { + opts := DefaultOptions() + opts.OutOfOrderTimeWindow = 3 * DefaultBlockDuration + db := openTestDB(t, opts, nil) + defer func() { + require.NoError(t, db.Close()) + }() + + // Disable compactions so we can control it. + db.DisableCompactions() + + metric := labels.FromStrings(labels.MetricName, "test_metric") + ctx := context.Background() + interval := int64(15 * time.Second / time.Millisecond) + ts := int64(0) + samplesWritten := 0 + + // Capture the first timestamp - this will be the timestamp of the OOO sample we'll append below. + oooTS := ts + ts += interval + + // Push samples after the OOO sample we'll write below. + for ; ts < 10*interval; ts += interval { + app := db.Appender(ctx) + _, err := app.Append(0, metric, ts, float64(ts)) + require.NoError(t, err) + require.NoError(t, app.Commit()) + samplesWritten++ + } + + // Push a single OOO sample. + app := db.Appender(ctx) + _, err := app.Append(0, metric, oooTS, float64(ts)) + require.NoError(t, err) + require.NoError(t, app.Commit()) + samplesWritten++ + + // Get a querier. + querier, err := db.ChunkQuerier(0, math.MaxInt64) + require.NoError(t, err) + + // Query back the series. + hints := &storage.SelectHints{Start: 0, End: math.MaxInt64, Step: interval} + seriesSet := querier.Select(ctx, true, hints, labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "test_metric")) + + // Start OOO head compaction. + compactionComplete := atomic.NewBool(false) + go func() { + defer compactionComplete.Store(true) + + require.NoError(t, db.CompactOOOHead(ctx)) + require.Equal(t, float64(1), prom_testutil.ToFloat64(db.Head().metrics.chunksRemoved)) + }() + + // Give CompactOOOHead time to start work. + // If it does not wait for the querier to be closed, then the query will return incorrect results or fail. + time.Sleep(time.Second) + require.False(t, compactionComplete.Load(), "compaction completed before reading chunks or closing querier") + + // Collect the iterator for the series. + var iterators []chunks.Iterator + for seriesSet.Next() { + iterators = append(iterators, seriesSet.At().Iterator(nil)) + } + require.NoError(t, seriesSet.Err()) + require.Len(t, iterators, 1) + iterator := iterators[0] + + // Check that we can still successfully read all samples. + samplesRead := 0 + for iterator.Next() { + samplesRead += iterator.At().Chunk.NumSamples() + } + + require.NoError(t, iterator.Err()) + require.Equal(t, samplesWritten, samplesRead) + + require.False(t, compactionComplete.Load(), "compaction completed before closing querier") + require.NoError(t, querier.Close()) + require.Eventually(t, compactionComplete.Load, time.Second, 10*time.Millisecond, "compaction should complete after querier was closed") +} + +func TestQuerierShouldNotFailIfOOOCompactionOccursAfterRetrievingIterators(t *testing.T) { + opts := DefaultOptions() + opts.OutOfOrderTimeWindow = 3 * DefaultBlockDuration + db := openTestDB(t, opts, nil) + defer func() { + require.NoError(t, db.Close()) + }() + + // Disable compactions so we can control it. + db.DisableCompactions() + + metric := labels.FromStrings(labels.MetricName, "test_metric") + ctx := context.Background() + interval := int64(15 * time.Second / time.Millisecond) + ts := int64(0) + samplesWritten := 0 + + // Capture the first timestamp - this will be the timestamp of the OOO sample we'll append below. + oooTS := ts + ts += interval + + // Push samples after the OOO sample we'll write below. + for ; ts < 10*interval; ts += interval { + app := db.Appender(ctx) + _, err := app.Append(0, metric, ts, float64(ts)) + require.NoError(t, err) + require.NoError(t, app.Commit()) + samplesWritten++ + } + + // Push a single OOO sample. + app := db.Appender(ctx) + _, err := app.Append(0, metric, oooTS, float64(ts)) + require.NoError(t, err) + require.NoError(t, app.Commit()) + samplesWritten++ + + // Get a querier. + querier, err := db.ChunkQuerier(0, math.MaxInt64) + require.NoError(t, err) + + // Query back the series. + hints := &storage.SelectHints{Start: 0, End: math.MaxInt64, Step: interval} + seriesSet := querier.Select(ctx, true, hints, labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "test_metric")) + + // Collect the iterator for the series. + var iterators []chunks.Iterator + for seriesSet.Next() { + iterators = append(iterators, seriesSet.At().Iterator(nil)) + } + require.NoError(t, seriesSet.Err()) + require.Len(t, iterators, 1) + iterator := iterators[0] + + // Start OOO head compaction. + compactionComplete := atomic.NewBool(false) + go func() { + defer compactionComplete.Store(true) + + require.NoError(t, db.CompactOOOHead(ctx)) + require.Equal(t, float64(1), prom_testutil.ToFloat64(db.Head().metrics.chunksRemoved)) + }() + + // Give CompactOOOHead time to start work. + // If it does not wait for the querier to be closed, then the query will return incorrect results or fail. + time.Sleep(time.Second) + require.False(t, compactionComplete.Load(), "compaction completed before reading chunks or closing querier") + + // Check that we can still successfully read all samples. + samplesRead := 0 + for iterator.Next() { + samplesRead += iterator.At().Chunk.NumSamples() + } + + require.NoError(t, iterator.Err()) + require.Equal(t, samplesWritten, samplesRead) + + require.False(t, compactionComplete.Load(), "compaction completed before closing querier") + require.NoError(t, querier.Close()) + require.Eventually(t, compactionComplete.Load, time.Second, 10*time.Millisecond, "compaction should complete after querier was closed") +} + func newTestDB(t *testing.T) *DB { dir := t.TempDir() diff --git a/tsdb/head.go b/tsdb/head.go index 419340506..bf181a415 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -106,6 +106,8 @@ type Head struct { iso *isolation + oooIso *oooIsolation + cardinalityMutex sync.Mutex cardinalityCache *index.PostingsStats // Posting stats cache which will expire after 30sec. lastPostingsStatsCall time.Duration // Last posting stats call (PostingsCardinalityStats()) time for caching. @@ -300,6 +302,7 @@ func (h *Head) resetInMemoryState() error { } h.iso = newIsolation(h.opts.IsolationDisabled) + h.oooIso = newOOOIsolation() h.exemplarMetrics = em h.exemplars = es @@ -1133,6 +1136,14 @@ func (h *Head) WaitForPendingReadersInTimeRange(mint, maxt int64) { } } +// WaitForPendingReadersForOOOChunksAtOrBefore is like WaitForPendingReadersInTimeRange, except it waits for +// queries touching OOO chunks less than or equal to chunk to finish querying. +func (h *Head) WaitForPendingReadersForOOOChunksAtOrBefore(chunk chunks.ChunkDiskMapperRef) { + for h.oooIso.HasOpenReadsAtOrBefore(chunk) { + time.Sleep(500 * time.Millisecond) + } +} + // WaitForAppendersOverlapping waits for appends overlapping maxt to finish. func (h *Head) WaitForAppendersOverlapping(maxt int64) { for maxt >= h.iso.lowestAppendTime() { @@ -1271,13 +1282,19 @@ func (h *Head) truncateWAL(mint int64) error { } // truncateOOO +// - waits for any pending reads that potentially touch chunks less than or equal to newMinOOOMmapRef // - truncates the OOO WBL files whose index is strictly less than lastWBLFile. -// - garbage collects all the m-map chunks from the memory that are less than or equal to minOOOMmapRef +// - garbage collects all the m-map chunks from the memory that are less than or equal to newMinOOOMmapRef // and then deletes the series that do not have any data anymore. -func (h *Head) truncateOOO(lastWBLFile int, minOOOMmapRef chunks.ChunkDiskMapperRef) error { +// +// The caller is responsible for ensuring that no further queriers will be created that reference chunks less +// than or equal to newMinOOOMmapRef before calling truncateOOO. +func (h *Head) truncateOOO(lastWBLFile int, newMinOOOMmapRef chunks.ChunkDiskMapperRef) error { curMinOOOMmapRef := chunks.ChunkDiskMapperRef(h.minOOOMmapRef.Load()) - if minOOOMmapRef.GreaterThan(curMinOOOMmapRef) { - h.minOOOMmapRef.Store(uint64(minOOOMmapRef)) + if newMinOOOMmapRef.GreaterThan(curMinOOOMmapRef) { + h.WaitForPendingReadersForOOOChunksAtOrBefore(newMinOOOMmapRef) + h.minOOOMmapRef.Store(uint64(newMinOOOMmapRef)) + if err := h.truncateSeriesAndChunkDiskMapper("truncateOOO"); err != nil { return err } diff --git a/tsdb/ooo_head.go b/tsdb/ooo_head.go index 1251af4a9..7f2110fa6 100644 --- a/tsdb/ooo_head.go +++ b/tsdb/ooo_head.go @@ -20,6 +20,7 @@ import ( "github.com/oklog/ulid" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/tombstones" ) @@ -113,22 +114,27 @@ type OOORangeHead struct { // the timerange of the query and having preexisting pointers to the first // and last timestamp help with that. mint, maxt int64 + + isoState *oooIsolationState } -func NewOOORangeHead(head *Head, mint, maxt int64) *OOORangeHead { +func NewOOORangeHead(head *Head, mint, maxt int64, minRef chunks.ChunkDiskMapperRef) *OOORangeHead { + isoState := head.oooIso.TrackReadAfter(minRef) + return &OOORangeHead{ - head: head, - mint: mint, - maxt: maxt, + head: head, + mint: mint, + maxt: maxt, + isoState: isoState, } } func (oh *OOORangeHead) Index() (IndexReader, error) { - return NewOOOHeadIndexReader(oh.head, oh.mint, oh.maxt), nil + return NewOOOHeadIndexReader(oh.head, oh.mint, oh.maxt, oh.isoState.minRef), nil } func (oh *OOORangeHead) Chunks() (ChunkReader, error) { - return NewOOOHeadChunkReader(oh.head, oh.mint, oh.maxt), nil + return NewOOOHeadChunkReader(oh.head, oh.mint, oh.maxt, oh.isoState), nil } func (oh *OOORangeHead) Tombstones() (tombstones.Reader, error) { diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index b9c2dc4a5..ace232657 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -38,26 +38,29 @@ var _ IndexReader = &OOOHeadIndexReader{} // decided to do this to avoid code duplication. // The only methods that change are the ones about getting Series and Postings. type OOOHeadIndexReader struct { - *headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible. + *headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible. + lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef } -func NewOOOHeadIndexReader(head *Head, mint, maxt int64) *OOOHeadIndexReader { +func NewOOOHeadIndexReader(head *Head, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *OOOHeadIndexReader { hr := &headIndexReader{ head: head, mint: mint, maxt: maxt, } - return &OOOHeadIndexReader{hr} + return &OOOHeadIndexReader{hr, lastGarbageCollectedMmapRef} } func (oh *OOOHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error { - return oh.series(ref, builder, chks, 0) + return oh.series(ref, builder, chks, oh.lastGarbageCollectedMmapRef, 0) } -// The passed lastMmapRef tells upto what max m-map chunk that we can consider. -// If it is 0, it means all chunks need to be considered. -// If it is non-0, then the oooHeadChunk must not be considered. -func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta, lastMmapRef chunks.ChunkDiskMapperRef) error { +// lastGarbageCollectedMmapRef gives the last mmap chunk that may be being garbage collected and so +// any chunk at or before this ref will not be considered. 0 disables this check. +// +// maxMmapRef tells upto what max m-map chunk that we can consider. If it is non-0, then +// the oooHeadChunk will not be considered. +func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef) error { s := oh.head.series.getByID(chunks.HeadSeriesRef(ref)) if s == nil { @@ -112,14 +115,14 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra // so we can set the correct markers. if s.ooo.oooHeadChunk != nil { c := s.ooo.oooHeadChunk - if c.OverlapsClosedInterval(oh.mint, oh.maxt) && lastMmapRef == 0 { + if c.OverlapsClosedInterval(oh.mint, oh.maxt) && maxMmapRef == 0 { ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks)))) addChunk(c.minTime, c.maxTime, ref) } } for i := len(s.ooo.oooMmappedChunks) - 1; i >= 0; i-- { c := s.ooo.oooMmappedChunks[i] - if c.OverlapsClosedInterval(oh.mint, oh.maxt) && (lastMmapRef == 0 || lastMmapRef.GreaterThanOrEqualTo(c.ref)) { + if c.OverlapsClosedInterval(oh.mint, oh.maxt) && (maxMmapRef == 0 || maxMmapRef.GreaterThanOrEqualTo(c.ref)) && (lastGarbageCollectedMmapRef == 0 || c.ref.GreaterThan(lastGarbageCollectedMmapRef)) { ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i))) addChunk(c.minTime, c.maxTime, ref) } @@ -232,13 +235,15 @@ func (oh *OOOHeadIndexReader) Postings(ctx context.Context, name string, values type OOOHeadChunkReader struct { head *Head mint, maxt int64 + isoState *oooIsolationState } -func NewOOOHeadChunkReader(head *Head, mint, maxt int64) *OOOHeadChunkReader { +func NewOOOHeadChunkReader(head *Head, mint, maxt int64, isoState *oooIsolationState) *OOOHeadChunkReader { return &OOOHeadChunkReader{ - head: head, - mint: mint, - maxt: maxt, + head: head, + mint: mint, + maxt: maxt, + isoState: isoState, } } @@ -272,6 +277,9 @@ func (cr OOOHeadChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { } func (cr OOOHeadChunkReader) Close() error { + if cr.isoState != nil { + cr.isoState.Close() + } return nil } @@ -306,7 +314,7 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead, ch.lastWBLFile = lastWBLFile } - ch.oooIR = NewOOOHeadIndexReader(head, math.MinInt64, math.MaxInt64) + ch.oooIR = NewOOOHeadIndexReader(head, math.MinInt64, math.MaxInt64, 0) n, v := index.AllPostingsKey() // TODO: verify this gets only ooo samples. @@ -365,7 +373,7 @@ func (ch *OOOCompactionHead) Index() (IndexReader, error) { } func (ch *OOOCompactionHead) Chunks() (ChunkReader, error) { - return NewOOOHeadChunkReader(ch.oooIR.head, ch.oooIR.mint, ch.oooIR.maxt), nil + return NewOOOHeadChunkReader(ch.oooIR.head, ch.oooIR.mint, ch.oooIR.maxt, nil), nil } func (ch *OOOCompactionHead) Tombstones() (tombstones.Reader, error) { @@ -391,7 +399,7 @@ func (ch *OOOCompactionHead) Meta() BlockMeta { // Only the method of BlockReader interface are valid for the cloned OOOCompactionHead. func (ch *OOOCompactionHead) CloneForTimeRange(mint, maxt int64) *OOOCompactionHead { return &OOOCompactionHead{ - oooIR: NewOOOHeadIndexReader(ch.oooIR.head, mint, maxt), + oooIR: NewOOOHeadIndexReader(ch.oooIR.head, mint, maxt, 0), lastMmapRef: ch.lastMmapRef, postings: ch.postings, chunkRange: ch.chunkRange, @@ -433,7 +441,7 @@ func (ir *OOOCompactionHeadIndexReader) SortedPostings(p index.Postings) index.P } func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error { - return ir.ch.oooIR.series(ref, builder, chks, ir.ch.lastMmapRef) + return ir.ch.oooIR.series(ref, builder, chks, 0, ir.ch.lastMmapRef) } func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index e74a7f9de..3f4b9bae7 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -356,7 +356,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { }) } - ir := NewOOOHeadIndexReader(h, tc.queryMinT, tc.queryMaxT) + ir := NewOOOHeadIndexReader(h, tc.queryMinT, tc.queryMaxT, 0) var chks []chunks.Meta var b labels.ScratchBuilder @@ -437,7 +437,7 @@ func TestOOOHeadChunkReader_LabelValues(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { // We first want to test using a head index reader that covers the biggest query interval - oh := NewOOOHeadIndexReader(head, tc.queryMinT, tc.queryMaxT) + oh := NewOOOHeadIndexReader(head, tc.queryMinT, tc.queryMaxT, 0) matchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")} values, err := oh.LabelValues(ctx, "foo", matchers...) sort.Strings(values) @@ -484,7 +484,8 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { t.Run("Getting a non existing chunk fails with not found error", func(t *testing.T) { db := newTestDBWithOpts(t, opts) - cr := NewOOOHeadChunkReader(db.head, 0, 1000) + cr := NewOOOHeadChunkReader(db.head, 0, 1000, nil) + defer cr.Close() c, err := cr.Chunk(chunks.Meta{ Ref: 0x1000000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300, }) @@ -842,14 +843,15 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { // The Series method is the one that populates the chunk meta OOO // markers like OOOLastRef. These are then used by the ChunkReader. - ir := NewOOOHeadIndexReader(db.head, tc.queryMinT, tc.queryMaxT) + ir := NewOOOHeadIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0) var chks []chunks.Meta var b labels.ScratchBuilder err := ir.Series(s1Ref, &b, &chks) require.NoError(t, err) require.Equal(t, len(tc.expChunksSamples), len(chks)) - cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT) + cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil) + defer cr.Close() for i := 0; i < len(chks); i++ { c, err := cr.Chunk(chks[i]) require.NoError(t, err) @@ -1005,7 +1007,7 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( // The Series method is the one that populates the chunk meta OOO // markers like OOOLastRef. These are then used by the ChunkReader. - ir := NewOOOHeadIndexReader(db.head, tc.queryMinT, tc.queryMaxT) + ir := NewOOOHeadIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0) var chks []chunks.Meta var b labels.ScratchBuilder err := ir.Series(s1Ref, &b, &chks) @@ -1020,7 +1022,8 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( } require.NoError(t, app.Commit()) - cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT) + cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil) + defer cr.Close() for i := 0; i < len(chks); i++ { c, err := cr.Chunk(chks[i]) require.NoError(t, err) diff --git a/tsdb/ooo_isolation.go b/tsdb/ooo_isolation.go new file mode 100644 index 000000000..3e3e165a0 --- /dev/null +++ b/tsdb/ooo_isolation.go @@ -0,0 +1,79 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tsdb + +import ( + "container/list" + "sync" + + "github.com/prometheus/prometheus/tsdb/chunks" +) + +type oooIsolation struct { + mtx sync.RWMutex + openReads *list.List +} + +type oooIsolationState struct { + i *oooIsolation + e *list.Element + + minRef chunks.ChunkDiskMapperRef +} + +func newOOOIsolation() *oooIsolation { + return &oooIsolation{ + openReads: list.New(), + } +} + +// HasOpenReadsAtOrBefore returns true if this oooIsolation is aware of any reads that use +// chunks with reference at or before ref. +func (i *oooIsolation) HasOpenReadsAtOrBefore(ref chunks.ChunkDiskMapperRef) bool { + i.mtx.RLock() + defer i.mtx.RUnlock() + + for e := i.openReads.Front(); e != nil; e = e.Next() { + s := e.Value.(*oooIsolationState) + + if ref.GreaterThan(s.minRef) { + return true + } + } + + return false +} + +// TrackReadAfter records a read that uses chunks with reference after minRef. +// +// The caller must ensure that the returned oooIsolationState is eventually closed when +// the read is complete. +func (i *oooIsolation) TrackReadAfter(minRef chunks.ChunkDiskMapperRef) *oooIsolationState { + s := &oooIsolationState{ + i: i, + minRef: minRef, + } + + i.mtx.Lock() + s.e = i.openReads.PushBack(s) + i.mtx.Unlock() + + return s +} + +func (s oooIsolationState) Close() { + s.i.mtx.Lock() + s.i.openReads.Remove(s.e) + s.i.mtx.Unlock() +} diff --git a/tsdb/ooo_isolation_test.go b/tsdb/ooo_isolation_test.go new file mode 100644 index 000000000..4ff0488ab --- /dev/null +++ b/tsdb/ooo_isolation_test.go @@ -0,0 +1,60 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tsdb + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestOOOIsolation(t *testing.T) { + i := newOOOIsolation() + + // Empty state shouldn't have any open reads. + require.False(t, i.HasOpenReadsAtOrBefore(0)) + require.False(t, i.HasOpenReadsAtOrBefore(1)) + require.False(t, i.HasOpenReadsAtOrBefore(2)) + require.False(t, i.HasOpenReadsAtOrBefore(3)) + + // Add a read. + read1 := i.TrackReadAfter(1) + require.False(t, i.HasOpenReadsAtOrBefore(0)) + require.False(t, i.HasOpenReadsAtOrBefore(1)) + require.True(t, i.HasOpenReadsAtOrBefore(2)) + + // Add another overlapping read. + read2 := i.TrackReadAfter(0) + require.False(t, i.HasOpenReadsAtOrBefore(0)) + require.True(t, i.HasOpenReadsAtOrBefore(1)) + require.True(t, i.HasOpenReadsAtOrBefore(2)) + + // Close the second read, should now only report open reads for the first read's ref. + read2.Close() + require.False(t, i.HasOpenReadsAtOrBefore(0)) + require.False(t, i.HasOpenReadsAtOrBefore(1)) + require.True(t, i.HasOpenReadsAtOrBefore(2)) + + // Close the second read again: this should do nothing and ensures we can safely call Close() multiple times. + read2.Close() + require.False(t, i.HasOpenReadsAtOrBefore(0)) + require.False(t, i.HasOpenReadsAtOrBefore(1)) + require.True(t, i.HasOpenReadsAtOrBefore(2)) + + // Closing the first read should indicate no further open reads. + read1.Close() + require.False(t, i.HasOpenReadsAtOrBefore(0)) + require.False(t, i.HasOpenReadsAtOrBefore(1)) + require.False(t, i.HasOpenReadsAtOrBefore(2)) +} diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 3c27ab2f3..7260d9d8b 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -2803,7 +2803,7 @@ func BenchmarkQueries(b *testing.B) { qHead, err := NewBlockQuerier(NewRangeHead(head, 1, nSamples), 1, nSamples) require.NoError(b, err) - qOOOHead, err := NewBlockQuerier(NewOOORangeHead(head, 1, nSamples), 1, nSamples) + qOOOHead, err := NewBlockQuerier(NewOOORangeHead(head, 1, nSamples, 0), 1, nSamples) require.NoError(b, err) queryTypes = append(queryTypes, qt{ From f0e1b592ab10ea89a56dcabdbbf91f5d7e91cc40 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 24 Nov 2023 14:38:35 +0000 Subject: [PATCH 605/632] Scraping: use slices.sort for exemplars The sort implementation using Go generics is used everywhere else in Prometheus. Signed-off-by: Bryan Boreham --- model/exemplar/exemplar.go | 15 +++++++++++++++ scrape/scrape.go | 14 ++------------ 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/model/exemplar/exemplar.go b/model/exemplar/exemplar.go index 2e39cf689..08f55374e 100644 --- a/model/exemplar/exemplar.go +++ b/model/exemplar/exemplar.go @@ -48,3 +48,18 @@ func (e Exemplar) Equals(e2 Exemplar) bool { return e.Value == e2.Value } + +// Sort first by timestamp, then value, then labels. +func Compare(a, b Exemplar) int { + if a.Ts < b.Ts { + return -1 + } else if a.Ts > b.Ts { + return 1 + } + if a.Value < b.Value { + return -1 + } else if a.Value > b.Value { + return 1 + } + return labels.Compare(a.Labels, b.Labels) +} diff --git a/scrape/scrape.go b/scrape/scrape.go index 983bee837..590eac889 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -24,7 +24,6 @@ import ( "math" "net/http" "reflect" - "sort" "strconv" "strings" "sync" @@ -1610,17 +1609,8 @@ loop: exemplars = append(exemplars, e) e = exemplar.Exemplar{} // Reset for next time round loop. } - sort.Slice(exemplars, func(i, j int) bool { - // Sort first by timestamp, then value, then labels so the checking - // for duplicates / out of order is more efficient during validation. - if exemplars[i].Ts != exemplars[j].Ts { - return exemplars[i].Ts < exemplars[j].Ts - } - if exemplars[i].Value != exemplars[j].Value { - return exemplars[i].Value < exemplars[j].Value - } - return exemplars[i].Labels.Hash() < exemplars[j].Labels.Hash() - }) + // Sort so that checking for duplicates / out of order is more efficient during validation. + slices.SortFunc(exemplars, exemplar.Compare) outOfOrderExemplars := 0 for _, e := range exemplars { _, exemplarErr := app.AppendExemplar(ref, lset, e) From 3e287e01700f48959fcd19f7e8b2de6a16d83726 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 16 Oct 2023 13:47:10 +0000 Subject: [PATCH 606/632] Scraping tests: refactor scrapeLoop creation Pull boilerplate code into a function. Where appropriate we set some config on the returned object. Signed-off-by: Bryan Boreham --- scrape/scrape_test.go | 818 ++++++------------------------------------ 1 file changed, 100 insertions(+), 718 deletions(-) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 5e4f3f30c..15be3bcfa 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -640,22 +640,22 @@ func TestScrapePoolScrapeLoopsStarted(t *testing.T) { } } -func TestScrapeLoopStopBeforeRun(t *testing.T) { - scraper := &testScraper{} - - sl := newScrapeLoop(context.Background(), +func newBasicScrapeLoop(t testing.TB, ctx context.Context, scraper scraper, app func(ctx context.Context) storage.Appender, interval time.Duration) *scrapeLoop { + return newScrapeLoop(ctx, scraper, nil, nil, nopMutator, nopMutator, - nil, nil, 0, + app, + nil, + 0, true, false, true, 0, 0, nil, - 1, - 0, + interval, + time.Hour, false, false, false, @@ -663,6 +663,11 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) { false, newTestScrapeMetrics(t), ) +} + +func TestScrapeLoopStopBeforeRun(t *testing.T) { + scraper := &testScraper{} + sl := newBasicScrapeLoop(t, context.Background(), scraper, nil, 1) // The scrape pool synchronizes on stopping scrape loops. However, new scrape // loops are started asynchronously. Thus it's possible, that a loop is stopped @@ -717,28 +722,7 @@ func TestScrapeLoopStop(t *testing.T) { app = func(ctx context.Context) storage.Appender { return appender } ) - sl := newScrapeLoop(context.Background(), - scraper, - nil, nil, - nopMutator, - nopMutator, - app, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 10*time.Millisecond, - time.Hour, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, context.Background(), scraper, app, 10*time.Millisecond) // Terminate loop after 2 scrapes. numScrapes := 0 @@ -857,28 +841,8 @@ func TestScrapeLoopRun(t *testing.T) { } ctx, cancel = context.WithCancel(context.Background()) - sl = newScrapeLoop(ctx, - scraper, - nil, nil, - nopMutator, - nopMutator, - app, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - time.Second, - 100*time.Millisecond, - false, - false, - false, - nil, - false, - scrapeMetrics, - ) + sl = newBasicScrapeLoop(t, ctx, scraper, app, time.Second) + sl.timeout = 100 * time.Millisecond go func() { sl.run(errc) @@ -920,28 +884,7 @@ func TestScrapeLoopForcedErr(t *testing.T) { ) ctx, cancel := context.WithCancel(context.Background()) - sl := newScrapeLoop(ctx, - scraper, - nil, nil, - nopMutator, - nopMutator, - app, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - time.Second, - time.Hour, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, ctx, scraper, app, time.Second) forcedErr := fmt.Errorf("forced err") sl.setForcedError(forcedErr) @@ -1044,28 +987,7 @@ func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) { t.Cleanup(func() { s.Close() }) ctx, cancel := context.WithCancel(context.Background()) - sl := newScrapeLoop(ctx, - &testScraper{}, - nil, nil, - nopMutator, - nopMutator, - s.Appender, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 0, - 0, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, ctx, &testScraper{}, s.Appender, 0) t.Cleanup(func() { cancel() }) return ctx, sl @@ -1106,30 +1028,10 @@ func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) { Separator: ";", Replacement: "$1", }} - sl := newScrapeLoop(ctx, - &testScraper{}, - nil, nil, - func(l labels.Labels) labels.Labels { - return mutateSampleLabels(l, target, true, relabelConfig) - }, - nopMutator, - s.Appender, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 0, - 0, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, ctx, &testScraper{}, s.Appender, 0) + sl.sampleMutator = func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, target, true, relabelConfig) + } slApp := sl.appender(ctx) total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\n"), "", time.Time{}) @@ -1190,28 +1092,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { ) ctx, cancel := context.WithCancel(context.Background()) - sl := newScrapeLoop(ctx, - scraper, - nil, nil, - nopMutator, - nopMutator, - app, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 10*time.Millisecond, - time.Hour, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) // Succeed once, several failures, then stop. numScrapes := 0 @@ -1257,28 +1138,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { ) ctx, cancel := context.WithCancel(context.Background()) - sl := newScrapeLoop(ctx, - scraper, - nil, nil, - nopMutator, - nopMutator, - app, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 10*time.Millisecond, - time.Hour, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) // Succeed once, several failures, then stop. scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { @@ -1327,28 +1187,7 @@ func TestScrapeLoopCache(t *testing.T) { ) ctx, cancel := context.WithCancel(context.Background()) - sl := newScrapeLoop(ctx, - scraper, - nil, nil, - nopMutator, - nopMutator, - app, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 10*time.Millisecond, - time.Hour, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) numScrapes := 0 @@ -1414,28 +1253,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) { ) ctx, cancel := context.WithCancel(context.Background()) - sl := newScrapeLoop(ctx, - scraper, - nil, nil, - nopMutator, - nopMutator, - app, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 10*time.Millisecond, - time.Hour, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) numScrapes := 0 @@ -1529,31 +1347,13 @@ func TestScrapeLoopAppend(t *testing.T) { labels: labels.FromStrings(test.discoveryLabels...), } - sl := newScrapeLoop(context.Background(), - nil, nil, nil, - func(l labels.Labels) labels.Labels { - return mutateSampleLabels(l, discoveryLabels, test.honorLabels, nil) - }, - func(l labels.Labels) labels.Labels { - return mutateReportSampleLabels(l, discoveryLabels) - }, - func(ctx context.Context) storage.Appender { return app }, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 0, - 0, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl.sampleMutator = func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, discoveryLabels, test.honorLabels, nil) + } + sl.reportSampleMutator = func(l labels.Labels) labels.Labels { + return mutateReportSampleLabels(l, discoveryLabels) + } now := time.Now() @@ -1635,14 +1435,10 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) { for name, tc := range testcases { t.Run(name, func(t *testing.T) { app := &collectResultAppender{} - sl := newScrapeLoop(context.Background(), nil, nil, nil, - func(l labels.Labels) labels.Labels { - return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil) - }, - nil, - func(ctx context.Context) storage.Appender { return app }, - nil, 0, true, false, true, 0, 0, nil, 0, 0, false, false, false, nil, false, newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl.sampleMutator = func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil) + } slApp := sl.appender(context.Background()) _, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)) require.NoError(t, err) @@ -1663,28 +1459,7 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) { func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { // collectResultAppender's AddFast always returns ErrNotFound if we don't give it a next. app := &collectResultAppender{} - - sl := newScrapeLoop(context.Background(), - nil, nil, nil, - nopMutator, - nopMutator, - func(ctx context.Context) storage.Appender { return app }, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 0, - 0, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) fakeRef := storage.SeriesRef(1) expValue := float64(1) @@ -1721,32 +1496,14 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) { resApp := &collectResultAppender{} app := &limitAppender{Appender: resApp, limit: 1} - sl := newScrapeLoop(context.Background(), - nil, nil, nil, - func(l labels.Labels) labels.Labels { - if l.Has("deleteme") { - return labels.EmptyLabels() - } - return l - }, - nopMutator, - func(ctx context.Context) storage.Appender { return app }, - nil, - 0, - true, - false, - true, - app.limit, 0, - nil, - 0, - 0, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl.sampleMutator = func(l labels.Labels) labels.Labels { + if l.Has("deleteme") { + return labels.EmptyLabels() + } + return l + } + sl.sampleLimit = app.limit // Get the value of the Counter before performing the append. beforeMetric := dto.Metric{} @@ -1802,32 +1559,14 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { resApp := &collectResultAppender{} app := &bucketLimitAppender{Appender: resApp, limit: 2} - sl := newScrapeLoop(context.Background(), - nil, nil, nil, - func(l labels.Labels) labels.Labels { - if l.Has("deleteme") { - return labels.EmptyLabels() - } - return l - }, - nopMutator, - func(ctx context.Context) storage.Appender { return app }, - nil, - 0, - true, - false, - true, - app.limit, 0, - nil, - 0, - 0, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl.sampleMutator = func(l labels.Labels) labels.Labels { + if l.Has("deleteme") { + return labels.EmptyLabels() + } + return l + } + sl.sampleLimit = app.limit metric := dto.Metric{} err := sl.metrics.targetScrapeNativeHistogramBucketLimit.Write(&metric) @@ -1908,28 +1647,7 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) { defer s.Close() capp := &collectResultAppender{} - - sl := newScrapeLoop(context.Background(), - nil, nil, nil, - nopMutator, - nopMutator, - func(ctx context.Context) storage.Appender { capp.next = s.Appender(ctx); return capp }, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 0, - 0, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return capp }, 0) now := time.Now() slApp := sl.appender(context.Background()) @@ -1961,27 +1679,7 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) { func TestScrapeLoopAppendStaleness(t *testing.T) { app := &collectResultAppender{} - sl := newScrapeLoop(context.Background(), - nil, nil, nil, - nopMutator, - nopMutator, - func(ctx context.Context) storage.Appender { return app }, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 0, - 0, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) now := time.Now() slApp := sl.appender(context.Background()) @@ -2016,28 +1714,7 @@ func TestScrapeLoopAppendStaleness(t *testing.T) { func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) { app := &collectResultAppender{} - sl := newScrapeLoop(context.Background(), - nil, nil, nil, - nopMutator, - nopMutator, - func(ctx context.Context) storage.Appender { return app }, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 0, - 0, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) - + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) now := time.Now() slApp := sl.appender(context.Background()) _, _, _, err := sl.append(slApp, []byte("metric_a 1 1000\n"), "", now) @@ -2061,27 +1738,8 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) { func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T) { app := &collectResultAppender{} - sl := newScrapeLoop(context.Background(), - nil, nil, nil, - nopMutator, - nopMutator, - func(ctx context.Context) storage.Appender { return app }, - nil, - 0, - true, - true, - true, - 0, 0, - nil, - 0, - 0, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl.trackTimestampsStaleness = true now := time.Now() slApp := sl.appender(context.Background()) @@ -2430,31 +2088,14 @@ metric: < labels: labels.FromStrings(test.discoveryLabels...), } - sl := newScrapeLoop(context.Background(), - nil, nil, nil, - func(l labels.Labels) labels.Labels { - return mutateSampleLabels(l, discoveryLabels, false, nil) - }, - func(l labels.Labels) labels.Labels { - return mutateReportSampleLabels(l, discoveryLabels) - }, - func(ctx context.Context) storage.Appender { return app }, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 0, - 0, - test.scrapeClassicHistograms, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl.sampleMutator = func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, discoveryLabels, false, nil) + } + sl.reportSampleMutator = func(l labels.Labels) labels.Labels { + return mutateReportSampleLabels(l, discoveryLabels) + } + sl.scrapeClassicHistograms = test.scrapeClassicHistograms now := time.Now() @@ -2521,31 +2162,13 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) { app := &collectResultAppender{} - sl := newScrapeLoop(context.Background(), - nil, nil, nil, - func(l labels.Labels) labels.Labels { - return mutateSampleLabels(l, discoveryLabels, false, nil) - }, - func(l labels.Labels) labels.Labels { - return mutateReportSampleLabels(l, discoveryLabels) - }, - func(ctx context.Context) storage.Appender { return app }, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 0, - 0, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl.sampleMutator = func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, discoveryLabels, false, nil) + } + sl.reportSampleMutator = func(l labels.Labels) labels.Labels { + return mutateReportSampleLabels(l, discoveryLabels) + } now := time.Now() @@ -2580,28 +2203,7 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) { ) ctx, cancel := context.WithCancel(context.Background()) - sl := newScrapeLoop(ctx, - scraper, - nil, nil, - nopMutator, - nopMutator, - app, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 10*time.Millisecond, - time.Hour, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { cancel() @@ -2620,28 +2222,7 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) { ) ctx, cancel := context.WithCancel(context.Background()) - sl := newScrapeLoop(ctx, - scraper, - nil, nil, - nopMutator, - nopMutator, - app, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 10*time.Millisecond, - time.Hour, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { cancel() @@ -2672,29 +2253,7 @@ func (app *errorAppender) Append(ref storage.SeriesRef, lset labels.Labels, t in func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T) { app := &errorAppender{} - - sl := newScrapeLoop(context.Background(), - nil, - nil, nil, - nopMutator, - nopMutator, - func(ctx context.Context) storage.Appender { return app }, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 0, - 0, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) now := time.Unix(1, 0) slApp := sl.appender(context.Background()) @@ -2717,32 +2276,14 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) { app := &collectResultAppender{} - sl := newScrapeLoop(context.Background(), - nil, - nil, nil, - nopMutator, - nopMutator, + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return &timeLimitAppender{ Appender: app, maxTime: timestamp.FromTime(time.Now().Add(10 * time.Minute)), } }, - nil, 0, - true, - false, - true, - 0, 0, - nil, - 0, - 0, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), ) now := time.Now().Add(20 * time.Minute) @@ -3014,29 +2555,8 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) { defer s.Close() app := s.Appender(context.Background()) - capp := &collectResultAppender{next: app} - - sl := newScrapeLoop(context.Background(), - nil, nil, nil, - nopMutator, - nopMutator, - func(ctx context.Context) storage.Appender { return capp }, - nil, 0, - true, - false, - true, - 0, 0, - nil, - 0, - 0, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return capp }, 0) now := time.Now() slApp := sl.appender(context.Background()) @@ -3062,26 +2582,8 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) { capp := &collectResultAppender{next: app} - sl := newScrapeLoop(context.Background(), - nil, nil, nil, - nopMutator, - nopMutator, - func(ctx context.Context) storage.Appender { return capp }, - nil, 0, - false, - false, - true, - 0, 0, - nil, - 0, - 0, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return capp }, 0) + sl.honorTimestamps = false now := time.Now() slApp := sl.appender(context.Background()) @@ -3104,28 +2606,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) { defer s.Close() ctx, cancel := context.WithCancel(context.Background()) - sl := newScrapeLoop(ctx, - &testScraper{}, - nil, nil, - nopMutator, - nopMutator, - s.Appender, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 0, - 0, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, ctx, &testScraper{}, s.Appender, 0) defer cancel() // We add a good and a bad metric to check that both are discarded. @@ -3161,33 +2642,13 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) { app := s.Appender(context.Background()) ctx, cancel := context.WithCancel(context.Background()) - sl := newScrapeLoop(context.Background(), - &testScraper{}, - nil, nil, - func(l labels.Labels) labels.Labels { - if l.Has("drop") { - return labels.FromStrings("no", "name") // This label set will trigger an error. - } - return l - }, - nopMutator, - func(ctx context.Context) storage.Appender { return app }, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 0, - 0, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, context.Background(), &testScraper{}, func(ctx context.Context) storage.Appender { return app }, 0) + sl.sampleMutator = func(l labels.Labels) labels.Labels { + if l.Has("drop") { + return labels.FromStrings("no", "name") // This label set will trigger an error. + } + return l + } defer cancel() slApp := sl.appender(context.Background()) @@ -3433,28 +2894,7 @@ func TestScrapeAddFast(t *testing.T) { defer s.Close() ctx, cancel := context.WithCancel(context.Background()) - sl := newScrapeLoop(ctx, - &testScraper{}, - nil, nil, - nopMutator, - nopMutator, - s.Appender, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 0, - 0, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, ctx, &testScraper{}, s.Appender, 0) defer cancel() slApp := sl.appender(ctx) @@ -3523,28 +2963,7 @@ func TestScrapeReportSingleAppender(t *testing.T) { ) ctx, cancel := context.WithCancel(context.Background()) - sl := newScrapeLoop(ctx, - scraper, - nil, nil, - nopMutator, - nopMutator, - s.Appender, - nil, - 0, - true, - false, - true, - 0, 0, - nil, - 10*time.Millisecond, - time.Hour, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, ctx, scraper, s.Appender, 10*time.Millisecond) numScrapes := 0 @@ -3726,31 +3145,14 @@ func TestScrapeLoopLabelLimit(t *testing.T) { labels: labels.FromStrings(test.discoveryLabels...), } - sl := newScrapeLoop(context.Background(), - nil, nil, nil, - func(l labels.Labels) labels.Labels { - return mutateSampleLabels(l, discoveryLabels, false, nil) - }, - func(l labels.Labels) labels.Labels { - return mutateReportSampleLabels(l, discoveryLabels) - }, - func(ctx context.Context) storage.Appender { return app }, - nil, - 0, - true, - false, - true, - 0, 0, - &test.labelLimits, - 0, - 0, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl.sampleMutator = func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, discoveryLabels, false, nil) + } + sl.reportSampleMutator = func(l labels.Labels) labels.Labels { + return mutateReportSampleLabels(l, discoveryLabels) + } + sl.labelLimits = &test.labelLimits slApp := sl.appender(context.Background()) _, _, _, err := sl.append(slApp, []byte(test.scrapeLabels), "", time.Now()) @@ -3936,28 +3338,8 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t * ) ctx, cancel := context.WithCancel(context.Background()) - sl := newScrapeLoop(ctx, - scraper, - nil, nil, - nopMutator, - nopMutator, - app, - nil, - 0, - true, - true, - true, - 0, 0, - nil, - 10*time.Millisecond, - time.Hour, - false, - false, - false, - nil, - false, - newTestScrapeMetrics(t), - ) + sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) + sl.trackTimestampsStaleness = true // Succeed once, several failures, then stop. numScrapes := 0 From 2e205ee95c121d8d6da0d8984f0b3bc599acaa2a Mon Sep 17 00:00:00 2001 From: Yury Molodov Date: Fri, 24 Nov 2023 22:44:48 +0100 Subject: [PATCH 607/632] ui: heatmap visualization for histogram buckets (#13096) ui: heatmap visualization for histogram buckets Signed-off-by: Yury Moladau --------- Signed-off-by: Yury Moladau --- .../react-app/src/pages/graph/Graph.test.tsx | 19 +- web/ui/react-app/src/pages/graph/Graph.tsx | 48 +++-- .../src/pages/graph/GraphControls.test.tsx | 13 +- .../src/pages/graph/GraphControls.tsx | 30 ++- .../src/pages/graph/GraphHeatmapHelpers.ts | 56 +++++ .../src/pages/graph/GraphHelpers.test.ts | 1 + .../react-app/src/pages/graph/GraphHelpers.ts | 62 +++--- .../src/pages/graph/GraphTabContent.tsx | 7 +- .../react-app/src/pages/graph/Panel.test.tsx | 10 +- web/ui/react-app/src/pages/graph/Panel.tsx | 30 ++- web/ui/react-app/src/types/index.d.ts | 1 + web/ui/react-app/src/utils/index.ts | 16 +- web/ui/react-app/src/utils/utils.test.ts | 17 +- .../src/vendor/flot/jquery.flot.heatmap.js | 195 ++++++++++++++++++ 14 files changed, 413 insertions(+), 92 deletions(-) create mode 100644 web/ui/react-app/src/pages/graph/GraphHeatmapHelpers.ts create mode 100644 web/ui/react-app/src/vendor/flot/jquery.flot.heatmap.js diff --git a/web/ui/react-app/src/pages/graph/Graph.test.tsx b/web/ui/react-app/src/pages/graph/Graph.test.tsx index 7b226792c..01625f7c2 100644 --- a/web/ui/react-app/src/pages/graph/Graph.test.tsx +++ b/web/ui/react-app/src/pages/graph/Graph.test.tsx @@ -4,6 +4,7 @@ import { shallow, mount } from 'enzyme'; import Graph from './Graph'; import ReactResizeDetector from 'react-resize-detector'; import { Legend } from './Legend'; +import { GraphDisplayMode } from './Panel'; describe('Graph', () => { beforeAll(() => { @@ -30,7 +31,7 @@ describe('Graph', () => { endTime: 1572130692, resolution: 28, }, - stacked: false, + displayMode: GraphDisplayMode.Stacked, data: { resultType: 'matrix', result: [ @@ -115,7 +116,7 @@ describe('Graph', () => { graph = mount( { ); }); it('should trigger state update when stacked prop is changed', () => { - graph.setProps({ stacked: false }); + graph.setProps({ displayMode: GraphDisplayMode.Lines }); expect(spyState).toHaveBeenCalledWith( { chartData: { @@ -177,7 +178,7 @@ describe('Graph', () => { const graph = mount( { const graph = shallow( { const graph = mount( { const graph = mount( { const graph = mount( { const graph: any = mount( ; }; exemplars: ExemplarData; - stacked: boolean; + displayMode: GraphDisplayMode; useLocalTime: boolean; showExemplars: boolean; handleTimeRangeSelection: (startTime: number, endTime: number) => void; @@ -69,11 +71,11 @@ class Graph extends PureComponent { }; componentDidUpdate(prevProps: GraphProps): void { - const { data, stacked, useLocalTime, showExemplars } = this.props; + const { data, displayMode, useLocalTime, showExemplars } = this.props; if (prevProps.data !== data) { this.selectedSeriesIndexes = []; this.setState({ chartData: normalizeData(this.props) }, this.plot); - } else if (prevProps.stacked !== stacked) { + } else if (prevProps.displayMode !== displayMode) { this.setState({ chartData: normalizeData(this.props) }, () => { if (this.selectedSeriesIndexes.length === 0) { this.plot(); @@ -143,7 +145,18 @@ class Graph extends PureComponent { } this.destroyPlot(); - this.$chart = $.plot($(this.chartRef.current), data, getOptions(this.props.stacked, this.props.useLocalTime)); + const options = getOptions(this.props.displayMode === GraphDisplayMode.Stacked, this.props.useLocalTime); + const isHeatmap = this.props.displayMode === GraphDisplayMode.Heatmap; + options.series.heatmap = isHeatmap; + + if (options.yaxis && isHeatmap) { + options.yaxis.ticks = () => new Array(data.length + 1).fill(0).map((_el, i) => i); + options.yaxis.tickFormatter = (val) => `${val ? data[val - 1].labels.le : ''}`; + options.yaxis.min = 0; + options.yaxis.max = data.length; + options.series.lines = { show: false }; + } + this.$chart = $.plot($(this.chartRef.current), data, options); }; destroyPlot = (): void => { @@ -165,7 +178,10 @@ class Graph extends PureComponent { const { chartData } = this.state; this.plot( this.selectedSeriesIndexes.length === 1 && this.selectedSeriesIndexes.includes(selectedIndex) - ? [...chartData.series.map(toHoverColor(selectedIndex, this.props.stacked)), ...chartData.exemplars] + ? [ + ...chartData.series.map(toHoverColor(selectedIndex, this.props.displayMode === GraphDisplayMode.Stacked)), + ...chartData.exemplars, + ] : [ ...chartData.series.filter((_, i) => selected.includes(i)), ...chartData.exemplars.filter((exemplar) => { @@ -190,7 +206,7 @@ class Graph extends PureComponent { } this.rafID = requestAnimationFrame(() => { this.plotSetAndDraw([ - ...this.state.chartData.series.map(toHoverColor(index, this.props.stacked)), + ...this.state.chartData.series.map(toHoverColor(index, this.props.displayMode === GraphDisplayMode.Stacked)), ...this.state.chartData.exemplars, ]); }); @@ -251,13 +267,15 @@ class Graph extends PureComponent { ) : null} - + {this.props.displayMode !== GraphDisplayMode.Heatmap && ( + + )} {/* This is to make sure the graph box expands when the selected exemplar info pops up. */}
diff --git a/web/ui/react-app/src/pages/graph/GraphControls.test.tsx b/web/ui/react-app/src/pages/graph/GraphControls.test.tsx index 3d1961714..b5d843ebe 100755 --- a/web/ui/react-app/src/pages/graph/GraphControls.test.tsx +++ b/web/ui/react-app/src/pages/graph/GraphControls.test.tsx @@ -5,13 +5,15 @@ import { Button, ButtonGroup, Form, InputGroup, InputGroupAddon, Input } from 'r import { FontAwesomeIcon } from '@fortawesome/react-fontawesome'; import { faPlus, faMinus, faChartArea, faChartLine } from '@fortawesome/free-solid-svg-icons'; import TimeInput from './TimeInput'; +import { GraphDisplayMode } from './Panel'; const defaultGraphControlProps = { range: 60 * 60 * 24 * 1000, endTime: 1572100217898, useLocalTime: false, resolution: 10, - stacked: false, + displayMode: GraphDisplayMode.Lines, + isHeatmapData: false, showExemplars: false, onChangeRange: (): void => { @@ -29,6 +31,9 @@ const defaultGraphControlProps = { onChangeShowExemplars: (): void => { // Do nothing. }, + onChangeDisplayMode: (): void => { + // Do nothing. + }, }; describe('GraphControls', () => { @@ -163,10 +168,10 @@ describe('GraphControls', () => { }, ].forEach((testCase) => { const results: boolean[] = []; - const onChange = (stacked: boolean): void => { - results.push(stacked); + const onChange = (mode: GraphDisplayMode): void => { + results.push(mode === GraphDisplayMode.Stacked); }; - const controls = shallow(); + const controls = shallow(); const group = controls.find(ButtonGroup); const btn = group.find(Button).filterWhere((btn) => btn.prop('title') === testCase.title); const onClick = btn.prop('onClick'); diff --git a/web/ui/react-app/src/pages/graph/GraphControls.tsx b/web/ui/react-app/src/pages/graph/GraphControls.tsx index 969400bfd..f71b46af5 100644 --- a/web/ui/react-app/src/pages/graph/GraphControls.tsx +++ b/web/ui/react-app/src/pages/graph/GraphControls.tsx @@ -2,23 +2,24 @@ import React, { Component } from 'react'; import { Button, ButtonGroup, Form, Input, InputGroup, InputGroupAddon } from 'reactstrap'; import { FontAwesomeIcon } from '@fortawesome/react-fontawesome'; -import { faChartArea, faChartLine, faMinus, faPlus } from '@fortawesome/free-solid-svg-icons'; +import { faChartArea, faChartLine, faMinus, faPlus, faBarChart } from '@fortawesome/free-solid-svg-icons'; import TimeInput from './TimeInput'; import { formatDuration, parseDuration } from '../../utils'; +import { GraphDisplayMode } from './Panel'; interface GraphControlsProps { range: number; endTime: number | null; useLocalTime: boolean; resolution: number | null; - stacked: boolean; + displayMode: GraphDisplayMode; + isHeatmapData: boolean; showExemplars: boolean; - onChangeRange: (range: number) => void; onChangeEndTime: (endTime: number | null) => void; onChangeResolution: (resolution: number | null) => void; - onChangeStacking: (stacked: boolean) => void; onChangeShowExemplars: (show: boolean) => void; + onChangeDisplayMode: (mode: GraphDisplayMode) => void; } class GraphControls extends Component { @@ -153,14 +154,29 @@ class GraphControls extends Component { - + {/* TODO: Consider replacing this button with a select dropdown in the future, + to allow users to choose from multiple histogram series if available. */} + {this.props.isHeatmapData && ( + + )} diff --git a/web/ui/react-app/src/pages/graph/GraphHeatmapHelpers.ts b/web/ui/react-app/src/pages/graph/GraphHeatmapHelpers.ts new file mode 100644 index 000000000..d564959cb --- /dev/null +++ b/web/ui/react-app/src/pages/graph/GraphHeatmapHelpers.ts @@ -0,0 +1,56 @@ +import { GraphProps, GraphSeries } from './Graph'; + +export function isHeatmapData(data: GraphProps['data']) { + if (!data?.result?.length || data?.result?.length < 2) { + return false; + } + const result = data.result; + const firstLabels = Object.keys(result[0].metric).filter((label) => label !== 'le'); + return result.every(({ metric }) => { + const labels = Object.keys(metric).filter((label) => label !== 'le'); + const allLabelsMatch = labels.every((label) => metric[label] === result[0].metric[label]); + return metric.le && labels.length === firstLabels.length && allLabelsMatch; + }); +} + +export function prepareHeatmapData(buckets: GraphSeries[]) { + if (!buckets.every((a) => a.labels.le)) { + return buckets; + } + + const sortedBuckets = buckets.sort((a, b) => promValueToNumber(a.labels.le) - promValueToNumber(b.labels.le)); + const result: GraphSeries[] = []; + + for (let i = 0; i < sortedBuckets.length; i++) { + const values = []; + const { data, labels, color } = sortedBuckets[i]; + + for (const [timestamp, value] of data) { + const prevVal = sortedBuckets[i - 1]?.data.find((v) => v[0] === timestamp)?.[1] || 0; + const newVal = Number(value) - prevVal; + values.push([Number(timestamp), newVal]); + } + + result.push({ + data: values, + labels, + color, + index: i, + }); + } + return result; +} + +export function promValueToNumber(s: string) { + switch (s) { + case 'NaN': + return NaN; + case 'Inf': + case '+Inf': + return Infinity; + case '-Inf': + return -Infinity; + default: + return parseFloat(s); + } +} diff --git a/web/ui/react-app/src/pages/graph/GraphHelpers.test.ts b/web/ui/react-app/src/pages/graph/GraphHelpers.test.ts index 5fb0675a4..206cc59a1 100644 --- a/web/ui/react-app/src/pages/graph/GraphHelpers.test.ts +++ b/web/ui/react-app/src/pages/graph/GraphHelpers.test.ts @@ -212,6 +212,7 @@ describe('GraphHelpers', () => { }, series: { stack: false, + heatmap: false, lines: { lineWidth: 1, steps: false, fill: true }, shadowSize: 0, }, diff --git a/web/ui/react-app/src/pages/graph/GraphHelpers.ts b/web/ui/react-app/src/pages/graph/GraphHelpers.ts index f77383f56..21bb768f5 100644 --- a/web/ui/react-app/src/pages/graph/GraphHelpers.ts +++ b/web/ui/react-app/src/pages/graph/GraphHelpers.ts @@ -1,9 +1,11 @@ import $ from 'jquery'; import { escapeHTML } from '../../utils'; -import { GraphProps, GraphData, GraphSeries, GraphExemplar } from './Graph'; +import { GraphData, GraphExemplar, GraphProps, GraphSeries } from './Graph'; import moment from 'moment-timezone'; import { colorPool } from './ColorPool'; +import { prepareHeatmapData } from './GraphHeatmapHelpers'; +import { GraphDisplayMode } from './Panel'; export const formatValue = (y: number | null): string => { if (y === null) { @@ -145,6 +147,7 @@ export const getOptions = (stacked: boolean, useLocalTime: boolean): jquery.flot }, series: { stack: false, // Stacking is set on a per-series basis because exemplar symbols don't support it. + heatmap: false, lines: { lineWidth: stacked ? 1 : 2, steps: false, @@ -158,7 +161,7 @@ export const getOptions = (stacked: boolean, useLocalTime: boolean): jquery.flot }; }; -export const normalizeData = ({ queryParams, data, exemplars, stacked }: GraphProps): GraphData => { +export const normalizeData = ({ queryParams, data, exemplars, displayMode }: GraphProps): GraphData => { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const { startTime, endTime, resolution } = queryParams!; @@ -188,36 +191,37 @@ export const normalizeData = ({ queryParams, data, exemplars, stacked }: GraphPr } const deviation = stdDeviation(sum, values); - return { - series: data.result.map(({ values, histograms, metric }, index) => { - // Insert nulls for all missing steps. - const data = []; - let valuePos = 0; - let histogramPos = 0; + const series = data.result.map(({ values, histograms, metric }, index) => { + // Insert nulls for all missing steps. + const data = []; + let valuePos = 0; + let histogramPos = 0; - for (let t = startTime; t <= endTime; t += resolution) { - // Allow for floating point inaccuracy. - const currentValue = values && values[valuePos]; - const currentHistogram = histograms && histograms[histogramPos]; - if (currentValue && values.length > valuePos && currentValue[0] < t + resolution / 100) { - data.push([currentValue[0] * 1000, parseValue(currentValue[1])]); - valuePos++; - } else if (currentHistogram && histograms.length > histogramPos && currentHistogram[0] < t + resolution / 100) { - data.push([currentHistogram[0] * 1000, parseValue(currentHistogram[1].sum)]); - histogramPos++; - } else { - data.push([t * 1000, null]); - } + for (let t = startTime; t <= endTime; t += resolution) { + // Allow for floating point inaccuracy. + const currentValue = values && values[valuePos]; + const currentHistogram = histograms && histograms[histogramPos]; + if (currentValue && values.length > valuePos && currentValue[0] < t + resolution / 100) { + data.push([currentValue[0] * 1000, parseValue(currentValue[1])]); + valuePos++; + } else if (currentHistogram && histograms.length > histogramPos && currentHistogram[0] < t + resolution / 100) { + data.push([currentHistogram[0] * 1000, parseValue(currentHistogram[1].sum)]); + histogramPos++; + } else { + data.push([t * 1000, null]); } + } + return { + labels: metric !== null ? metric : {}, + color: colorPool[index % colorPool.length], + stack: displayMode === GraphDisplayMode.Stacked, + data, + index, + }; + }); - return { - labels: metric !== null ? metric : {}, - color: colorPool[index % colorPool.length], - stack: stacked, - data, - index, - }; - }), + return { + series: displayMode === GraphDisplayMode.Heatmap ? prepareHeatmapData(series) : series, exemplars: Object.values(buckets).flatMap((bucket) => { if (bucket.length === 1) { return bucket[0]; diff --git a/web/ui/react-app/src/pages/graph/GraphTabContent.tsx b/web/ui/react-app/src/pages/graph/GraphTabContent.tsx index e31c7a280..a5e752994 100644 --- a/web/ui/react-app/src/pages/graph/GraphTabContent.tsx +++ b/web/ui/react-app/src/pages/graph/GraphTabContent.tsx @@ -3,12 +3,13 @@ import { Alert } from 'reactstrap'; import Graph from './Graph'; import { QueryParams, ExemplarData } from '../../types/types'; import { isPresent } from '../../utils'; +import { GraphDisplayMode } from './Panel'; interface GraphTabContentProps { // eslint-disable-next-line @typescript-eslint/no-explicit-any data: any; exemplars: ExemplarData; - stacked: boolean; + displayMode: GraphDisplayMode; useLocalTime: boolean; showExemplars: boolean; handleTimeRangeSelection: (startTime: number, endTime: number) => void; @@ -19,7 +20,7 @@ interface GraphTabContentProps { export const GraphTabContent: FC = ({ data, exemplars, - stacked, + displayMode, useLocalTime, lastQueryParams, showExemplars, @@ -41,7 +42,7 @@ export const GraphTabContent: FC = ({ { @@ -84,7 +84,7 @@ describe('Panel', () => { range: 10, endTime: 1572100217898, resolution: 28, - stacked: false, + displayMode: GraphDisplayMode.Lines, showExemplars: true, }; const graphPanel = mount(); @@ -94,8 +94,8 @@ describe('Panel', () => { expect(controls.prop('endTime')).toEqual(options.endTime); expect(controls.prop('range')).toEqual(options.range); expect(controls.prop('resolution')).toEqual(options.resolution); - expect(controls.prop('stacked')).toEqual(options.stacked); - expect(graph.prop('stacked')).toEqual(options.stacked); + expect(controls.prop('displayMode')).toEqual(options.displayMode); + expect(graph.prop('displayMode')).toEqual(options.displayMode); }); describe('when switching between modes', () => { diff --git a/web/ui/react-app/src/pages/graph/Panel.tsx b/web/ui/react-app/src/pages/graph/Panel.tsx index 244a7606e..501ab67c9 100644 --- a/web/ui/react-app/src/pages/graph/Panel.tsx +++ b/web/ui/react-app/src/pages/graph/Panel.tsx @@ -13,6 +13,7 @@ import QueryStatsView, { QueryStats } from './QueryStatsView'; import { QueryParams, ExemplarData } from '../../types/types'; import { API_PATH } from '../../constants/constants'; import { debounce } from '../../utils'; +import { isHeatmapData } from './GraphHeatmapHelpers'; interface PanelProps { options: PanelOptions; @@ -39,6 +40,7 @@ interface PanelState { error: string | null; stats: QueryStats | null; exprInputValue: string; + isHeatmapData: boolean; } export interface PanelOptions { @@ -47,7 +49,7 @@ export interface PanelOptions { range: number; // Range in milliseconds. endTime: number | null; // Timestamp in milliseconds. resolution: number | null; // Resolution in seconds. - stacked: boolean; + displayMode: GraphDisplayMode; showExemplars: boolean; } @@ -56,13 +58,19 @@ export enum PanelType { Table = 'table', } +export enum GraphDisplayMode { + Lines = 'lines', + Stacked = 'stacked', + Heatmap = 'heatmap', +} + export const PanelDefaultOptions: PanelOptions = { type: PanelType.Table, expr: '', range: 60 * 60 * 1000, endTime: null, resolution: null, - stacked: false, + displayMode: GraphDisplayMode.Lines, showExemplars: false, }; @@ -82,6 +90,7 @@ class Panel extends Component { error: null, stats: null, exprInputValue: props.options.expr, + isHeatmapData: false, }; this.debounceExecuteQuery = debounce(this.executeQuery.bind(this), 250); @@ -184,6 +193,11 @@ class Panel extends Component { } } + const isHeatmap = isHeatmapData(query.data); + if (!isHeatmap) { + this.setOptions({ displayMode: GraphDisplayMode.Lines }); + } + this.setState({ error: null, data: query.data, @@ -200,6 +214,7 @@ class Panel extends Component { resultSeries, }, loading: false, + isHeatmapData: isHeatmap, }); this.abortInFlightFetch = null; } catch (err: unknown) { @@ -252,8 +267,8 @@ class Panel extends Component { this.setOptions({ type: type }); }; - handleChangeStacking = (stacked: boolean): void => { - this.setOptions({ stacked: stacked }); + handleChangeDisplayMode = (mode: GraphDisplayMode): void => { + this.setOptions({ displayMode: mode }); }; handleChangeShowExemplars = (show: boolean): void => { @@ -337,18 +352,19 @@ class Panel extends Component { endTime={options.endTime} useLocalTime={this.props.useLocalTime} resolution={options.resolution} - stacked={options.stacked} + displayMode={options.displayMode} + isHeatmapData={this.state.isHeatmapData} showExemplars={options.showExemplars} onChangeRange={this.handleChangeRange} onChangeEndTime={this.handleChangeEndTime} onChangeResolution={this.handleChangeResolution} - onChangeStacking={this.handleChangeStacking} + onChangeDisplayMode={this.handleChangeDisplayMode} onChangeShowExemplars={this.handleChangeShowExemplars} /> { @@ -196,8 +196,12 @@ export const parseOption = (param: string): Partial => { case 'tab': return { type: decodedValue === '0' ? PanelType.Graph : PanelType.Table }; + case 'display_mode': + const validKey = Object.values(GraphDisplayMode).includes(decodedValue as GraphDisplayMode); + return { displayMode: validKey ? (decodedValue as GraphDisplayMode) : GraphDisplayMode.Lines }; + case 'stacked': - return { stacked: decodedValue === '1' }; + return { displayMode: decodedValue === '1' ? GraphDisplayMode.Stacked : GraphDisplayMode.Lines }; case 'show_exemplars': return { showExemplars: decodedValue === '1' }; @@ -225,12 +229,12 @@ export const formatParam = export const toQueryString = ({ key, options }: PanelMeta): string => { const formatWithKey = formatParam(key); - const { expr, type, stacked, range, endTime, resolution, showExemplars } = options; + const { expr, type, displayMode, range, endTime, resolution, showExemplars } = options; const time = isPresent(endTime) ? formatTime(endTime) : false; const urlParams = [ formatWithKey('expr', expr), formatWithKey('tab', type === PanelType.Graph ? 0 : 1), - formatWithKey('stacked', stacked ? 1 : 0), + formatWithKey('display_mode', displayMode), formatWithKey('show_exemplars', showExemplars ? 1 : 0), formatWithKey('range_input', formatDuration(range)), time ? `${formatWithKey('end_input', time)}&${formatWithKey('moment_input', time)}` : '', @@ -264,7 +268,9 @@ export const getQueryParam = (key: string): string => { }; export const createExpressionLink = (expr: string): string => { - return `../graph?g0.expr=${encodeURIComponent(expr)}&g0.tab=1&g0.stacked=0&g0.show_exemplars=0.g0.range_input=1h.`; + return `../graph?g0.expr=${encodeURIComponent(expr)}&g0.tab=1&g0.display_mode=${ + GraphDisplayMode.Lines + }&g0.show_exemplars=0.g0.range_input=1h.`; }; // eslint-disable-next-line @typescript-eslint/no-explicit-any, diff --git a/web/ui/react-app/src/utils/utils.test.ts b/web/ui/react-app/src/utils/utils.test.ts index 4db3e28a6..4b1446581 100644 --- a/web/ui/react-app/src/utils/utils.test.ts +++ b/web/ui/react-app/src/utils/utils.test.ts @@ -16,7 +16,7 @@ import { decodePanelOptionsFromQueryString, parsePrometheusFloat, } from '.'; -import { PanelType } from '../pages/graph/Panel'; +import { GraphDisplayMode, PanelType } from '../pages/graph/Panel'; describe('Utils', () => { describe('escapeHTML', (): void => { @@ -210,7 +210,7 @@ describe('Utils', () => { expr: 'rate(node_cpu_seconds_total{mode="system"}[1m])', range: 60 * 60 * 1000, resolution: null, - stacked: false, + displayMode: GraphDisplayMode.Lines, type: PanelType.Graph, }, }, @@ -221,13 +221,12 @@ describe('Utils', () => { expr: 'node_filesystem_avail_bytes', range: 60 * 60 * 1000, resolution: null, - stacked: false, + displayMode: GraphDisplayMode.Lines, type: PanelType.Table, }, }, ]; - const query = - '?g0.expr=rate(node_cpu_seconds_total%7Bmode%3D%22system%22%7D%5B1m%5D)&g0.tab=0&g0.stacked=0&g0.show_exemplars=0&g0.range_input=1h&g0.end_input=2019-10-25%2023%3A37%3A00&g0.moment_input=2019-10-25%2023%3A37%3A00&g1.expr=node_filesystem_avail_bytes&g1.tab=1&g1.stacked=0&g1.show_exemplars=0&g1.range_input=1h'; + const query = `?g0.expr=rate(node_cpu_seconds_total%7Bmode%3D%22system%22%7D%5B1m%5D)&g0.tab=0&g0.display_mode=${GraphDisplayMode.Lines}&g0.show_exemplars=0&g0.range_input=1h&g0.end_input=2019-10-25%2023%3A37%3A00&g0.moment_input=2019-10-25%2023%3A37%3A00&g1.expr=node_filesystem_avail_bytes&g1.tab=1&g1.display_mode=${GraphDisplayMode.Lines}&g1.show_exemplars=0&g1.range_input=1h`; describe('decodePanelOptionsFromQueryString', () => { it('returns [] when query is empty', () => { @@ -246,7 +245,7 @@ describe('Utils', () => { expect(parseOption('expr=foo')).toEqual({ expr: 'foo' }); }); it('should parse stacked', () => { - expect(parseOption('stacked=1')).toEqual({ stacked: true }); + expect(parseOption('stacked=1')).toEqual({ displayMode: GraphDisplayMode.Stacked }); }); it('should parse end_input', () => { expect(parseOption('end_input=2019-10-25%2023%3A37')).toEqual({ endTime: moment.utc('2019-10-25 23:37').valueOf() }); @@ -294,14 +293,16 @@ describe('Utils', () => { options: { expr: 'foo', type: PanelType.Graph, - stacked: true, + displayMode: GraphDisplayMode.Stacked, showExemplars: true, range: 0, endTime: null, resolution: 1, }, }) - ).toEqual('g0.expr=foo&g0.tab=0&g0.stacked=1&g0.show_exemplars=1&g0.range_input=0s&g0.step_input=1'); + ).toEqual( + `g0.expr=foo&g0.tab=0&g0.display_mode=${GraphDisplayMode.Stacked}&g0.show_exemplars=1&g0.range_input=0s&g0.step_input=1` + ); }); }); diff --git a/web/ui/react-app/src/vendor/flot/jquery.flot.heatmap.js b/web/ui/react-app/src/vendor/flot/jquery.flot.heatmap.js new file mode 100644 index 000000000..29d5c81ef --- /dev/null +++ b/web/ui/react-app/src/vendor/flot/jquery.flot.heatmap.js @@ -0,0 +1,195 @@ +/* Flot plugin for rendering heatmap charts. + +Inspired by a similar feature in VictoriaMetrics. +See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3384 for more details. + */ + +import moment from 'moment-timezone'; +import {formatValue} from "../../pages/graph/GraphHelpers"; + +const TOOLTIP_ID = 'heatmap-tooltip'; +const GRADIENT_STEPS = 16; + +(function ($) { + let mouseMoveHandler = null; + + function init(plot) { + plot.hooks.draw.push((plot, ctx) => { + const options = plot.getOptions(); + if (!options.series.heatmap) { + return; + } + + const series = plot.getData(); + const fillPalette = generateGradient("#FDF4EB", "#752E12", GRADIENT_STEPS); + const fills = countsToFills(series.flatMap(s => s.data.map(d => d[1])), fillPalette); + series.forEach((s, i) => drawHeatmap(s, plot, ctx, i, fills)); + }); + + plot.hooks.bindEvents.push((plot, eventHolder) => { + const options = plot.getOptions(); + if (!options.series.heatmap || !options.tooltip.show) { + return; + } + + mouseMoveHandler = (e) => { + removeTooltip(); + const {left: xOffset, top: yOffset} = plot.offset(); + const pos = plot.c2p({left: e.pageX - xOffset, top: e.pageY - yOffset}); + const seriesIdx = Math.floor(pos.y); + const series = plot.getData(); + + for (let i = 0; i < series.length; i++) { + if (seriesIdx !== i) { + continue; + } + + const s = series[i]; + const label = s?.labels?.le || "" + const prevLabel = series[i - 1]?.labels?.le || "" + for (let j = 0; j < s.data.length - 1; j++) { + const [xStartVal, yStartVal] = s.data[j]; + const [xEndVal] = s.data[j + 1]; + const isIncluded = pos.x >= xStartVal && pos.x <= xEndVal; + if (yStartVal && isIncluded) { + showTooltip({ + cssClass: options.tooltip.cssClass, + x: e.pageX, + y: e.pageY, + value: formatValue(yStartVal), + dateTime: [xStartVal, xEndVal].map(t => moment(t).format('YYYY-MM-DD HH:mm:ss Z')), + label: `${prevLabel} - ${label}`, + }); + + break; + } + } + } + } + + $(eventHolder).on('mousemove', mouseMoveHandler); + }); + + plot.hooks.shutdown.push((_plot, eventHolder) => { + removeTooltip(); + $(eventHolder).off("mousemove", mouseMoveHandler); + }); + } + + function showTooltip({x, y, cssClass, value, dateTime, label}) { + const tooltip = document.createElement('div'); + tooltip.id = TOOLTIP_ID + tooltip.className = cssClass; + + const timeHtml = `
${dateTime.join('
')}
` + const labelHtml = `
Bucket: ${label || 'value'}
` + const valueHtml = `
Value: ${value}
` + tooltip.innerHTML = `
${timeHtml}
${labelHtml}${valueHtml}
`; + + tooltip.style.position = 'absolute'; + tooltip.style.top = y + 5 + 'px'; + tooltip.style.left = x + 5 + 'px'; + tooltip.style.display = 'none'; + document.body.appendChild(tooltip); + + const totalTipWidth = $(tooltip).outerWidth(); + const totalTipHeight = $(tooltip).outerHeight(); + + if (x > ($(window).width() - totalTipWidth)) { + x -= totalTipWidth; + tooltip.style.left = x + 'px'; + } + + if (y > ($(window).height() - totalTipHeight)) { + y -= totalTipHeight; + tooltip.style.top = y + 'px'; + } + + tooltip.style.display = 'block'; // This will trigger a re-render, allowing fadeIn to work + tooltip.style.opacity = '1'; + } + + function removeTooltip() { + let tooltip = document.getElementById(TOOLTIP_ID); + if (tooltip) { + document.body.removeChild(tooltip); + } + } + + function drawHeatmap(series, plot, ctx, seriesIndex, fills) { + const {data: dataPoints} = series; + const {left: xOffset, top: yOffset} = plot.getPlotOffset(); + const plotHeight = plot.height(); + const xaxis = plot.getXAxes()[0]; + const cellHeight = plotHeight / plot.getData().length; + + ctx.save(); + ctx.translate(xOffset, yOffset); + + for (let i = 0, len = dataPoints.length - 1; i < len; i++) { + const [xStartVal, countStart] = dataPoints[i]; + const [xEndVal] = dataPoints[i + 1]; + + const xStart = xaxis.p2c(xStartVal); + const xEnd = xaxis.p2c(xEndVal); + const cellWidth = xEnd - xStart; + const yStart = plotHeight - (seriesIndex + 1) * cellHeight; + + ctx.fillStyle = fills[countStart]; + ctx.fillRect(xStart + 0.5, yStart + 0.5, cellWidth - 1, cellHeight - 1); + } + + ctx.restore(); + } + + function countsToFills(counts, fillPalette) { + const hideThreshold = 0; + const minCount = Math.min(...counts.filter(count => count > hideThreshold)); + const maxCount = Math.max(...counts); + const range = maxCount - minCount; + const paletteSize = fillPalette.length; + + return counts.reduce((acc, count) => { + const index = count === 0 + ? -1 + : Math.min(paletteSize - 1, Math.floor((paletteSize * (count - minCount)) / range)); + acc[count] = fillPalette[index] || "transparent"; + return acc; + }, {}); + } + + function generateGradient(color1, color2, steps) { + function interpolateColor(startColor, endColor, step) { + let r = startColor[0] + step * (endColor[0] - startColor[0]); + let g = startColor[1] + step * (endColor[1] - startColor[1]); + let b = startColor[2] + step * (endColor[2] - startColor[2]); + + return `rgb(${Math.round(r)}, ${Math.round(g)}, ${Math.round(b)})`; + } + + function hexToRgb(hex) { + const bigint = parseInt(hex.slice(1), 16); + const r = (bigint >> 16) & 255; + const g = (bigint >> 8) & 255; + const b = bigint & 255; + + return [r, g, b]; + } + + return new Array(steps).fill("").map((_el, i) => { + return interpolateColor(hexToRgb(color1), hexToRgb(color2), i / (steps - 1)); + }); + } + + + jQuery.plot.plugins.push({ + init, + options: { + series: { + heatmap: false + } + }, + name: 'heatmap', + version: '1.0' + }); +})(jQuery); From ccfe14d7e71fc1dcf2be0efc1a5550ec9336907e Mon Sep 17 00:00:00 2001 From: zenador Date: Sat, 25 Nov 2023 07:05:38 +0800 Subject: [PATCH 608/632] PromQL: ignore small errors for bucketQuantile (#13153) promql: Improve histogram_quantile calculation for classic buckets Tiny differences between classic buckets are most likely caused by floating point precision issues. With this commit, relative changes below a certain threshold are ignored. This makes the result of histogram_quantile more meaningful, and also avoids triggering the _input to histogram_quantile needed to be fixed for monotonicity_ annotations in unactionable cases. This commit also adds explanation of the new adjustment and of the monotonicity annotation to the documentation of `histogram_quantile`. --------- Signed-off-by: Jeanette Tan --- docs/querying/functions.md | 18 ++ promql/engine_test.go | 2 +- promql/functions.go | 2 +- promql/quantile.go | 93 +++++++--- promql/quantile_test.go | 318 ++++++++++++++++++++++++++++++++ promql/test.go | 17 +- util/annotations/annotations.go | 2 +- 7 files changed, 416 insertions(+), 36 deletions(-) create mode 100644 promql/quantile_test.go diff --git a/docs/querying/functions.md b/docs/querying/functions.md index 6838bb72b..00afa1d22 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -323,6 +323,24 @@ a histogram. You can use `histogram_quantile(1, v instant-vector)` to get the estimated maximum value stored in a histogram. +Buckets of classic histograms are cumulative. Therefore, the following should always be the case: + +- The counts in the buckets are monotonically increasing (strictly non-decreasing). +- A lack of observations between the upper limits of two consecutive buckets results in equal counts +in those two buckets. + +However, floating point precision issues (e.g. small discrepancies introduced by computing of buckets +with `sum(rate(...))`) or invalid data might violate these assumptions. In that case, +`histogram_quantile` would be unable to return meaningful results. To mitigate the issue, +`histogram_quantile` assumes that tiny relative differences between consecutive buckets are happening +because of floating point precision errors and ignores them. (The threshold to ignore a difference +between two buckets is a trillionth (1e-12) of the sum of both buckets.) Furthermore, if there are +non-monotonic bucket counts even after this adjustment, they are increased to the value of the +previous buckets to enforce monotonicity. The latter is evidence for an actual issue with the input +data and is therefore flagged with an informational annotation reading `input to histogram_quantile +needed to be fixed for monotonicity`. If you encounter this annotation, you should find and remove +the source of the invalid data. + ## `histogram_stddev()` and `histogram_stdvar()` _Both functions only act on native histograms, which are an experimental diff --git a/promql/engine_test.go b/promql/engine_test.go index 7532a3294..d6a10455a 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -3699,7 +3699,7 @@ func TestNativeHistogram_HistogramQuantile(t *testing.T) { require.Len(t, vector, 1) require.Nil(t, vector[0].H) - require.True(t, almostEqual(sc.value, vector[0].F)) + require.True(t, almostEqual(sc.value, vector[0].F, defaultEpsilon)) }) } idx++ diff --git a/promql/functions.go b/promql/functions.go index e9a03406a..07e439cf2 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -1163,7 +1163,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev for _, mb := range enh.signatureToMetricWithBuckets { if len(mb.buckets) > 0 { - res, forcedMonotonicity := bucketQuantile(q, mb.buckets) + res, forcedMonotonicity, _ := bucketQuantile(q, mb.buckets) enh.Out = append(enh.Out, Sample{ Metric: mb.metric, F: res, diff --git a/promql/quantile.go b/promql/quantile.go index f28944868..f62519f5b 100644 --- a/promql/quantile.go +++ b/promql/quantile.go @@ -23,6 +23,25 @@ import ( "github.com/prometheus/prometheus/model/labels" ) +// smallDeltaTolerance is the threshold for relative deltas between classic +// histogram buckets that will be ignored by the histogram_quantile function +// because they are most likely artifacts of floating point precision issues. +// Testing on 2 sets of real data with bugs arising from small deltas, +// the safe ranges were from: +// - 1e-05 to 1e-15 +// - 1e-06 to 1e-15 +// Anything to the left of that would cause non-query-sharded data to have +// small deltas ignored (unnecessary and we should avoid this), and anything +// to the right of that would cause query-sharded data to not have its small +// deltas ignored (so the problem won't be fixed). +// For context, query sharding triggers these float precision errors in Mimir. +// To illustrate, with a relative deviation of 1e-12, we need to have 1e12 +// observations in the bucket so that the change of one observation is small +// enough to get ignored. With the usual observation rate even of very busy +// services, this will hardly be reached in timeframes that matters for +// monitoring. +const smallDeltaTolerance = 1e-12 + // Helpers to calculate quantiles. // excludedLabels are the labels to exclude from signature calculation for @@ -72,16 +91,19 @@ type metricWithBuckets struct { // // If q>1, +Inf is returned. // -// We also return a bool to indicate if monotonicity needed to be forced. -func bucketQuantile(q float64, buckets buckets) (float64, bool) { +// We also return a bool to indicate if monotonicity needed to be forced, +// and another bool to indicate if small differences between buckets (that +// are likely artifacts of floating point precision issues) have been +// ignored. +func bucketQuantile(q float64, buckets buckets) (float64, bool, bool) { if math.IsNaN(q) { - return math.NaN(), false + return math.NaN(), false, false } if q < 0 { - return math.Inf(-1), false + return math.Inf(-1), false, false } if q > 1 { - return math.Inf(+1), false + return math.Inf(+1), false, false } slices.SortFunc(buckets, func(a, b bucket) int { // We don't expect the bucket boundary to be a NaN. @@ -94,27 +116,27 @@ func bucketQuantile(q float64, buckets buckets) (float64, bool) { return 0 }) if !math.IsInf(buckets[len(buckets)-1].upperBound, +1) { - return math.NaN(), false + return math.NaN(), false, false } buckets = coalesceBuckets(buckets) - forcedMonotonic := ensureMonotonic(buckets) + forcedMonotonic, fixedPrecision := ensureMonotonicAndIgnoreSmallDeltas(buckets, smallDeltaTolerance) if len(buckets) < 2 { - return math.NaN(), false + return math.NaN(), false, false } observations := buckets[len(buckets)-1].count if observations == 0 { - return math.NaN(), false + return math.NaN(), false, false } rank := q * observations b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].count >= rank }) if b == len(buckets)-1 { - return buckets[len(buckets)-2].upperBound, forcedMonotonic + return buckets[len(buckets)-2].upperBound, forcedMonotonic, fixedPrecision } if b == 0 && buckets[0].upperBound <= 0 { - return buckets[0].upperBound, forcedMonotonic + return buckets[0].upperBound, forcedMonotonic, fixedPrecision } var ( bucketStart float64 @@ -126,7 +148,7 @@ func bucketQuantile(q float64, buckets buckets) (float64, bool) { count -= buckets[b-1].count rank -= buckets[b-1].count } - return bucketStart + (bucketEnd-bucketStart)*(rank/count), forcedMonotonic + return bucketStart + (bucketEnd-bucketStart)*(rank/count), forcedMonotonic, fixedPrecision } // histogramQuantile calculates the quantile 'q' based on the given histogram. @@ -348,6 +370,7 @@ func coalesceBuckets(buckets buckets) buckets { // - Ingestion via the remote write receiver that Prometheus implements. // - Optimisation of query execution where precision is sacrificed for other // benefits, not by Prometheus but by systems built on top of it. +// - Circumstances where floating point precision errors accumulate. // // Monotonicity is usually guaranteed because if a bucket with upper bound // u1 has count c1, then any bucket with a higher upper bound u > u1 must @@ -357,22 +380,42 @@ func coalesceBuckets(buckets buckets) buckets { // bucket with the φ-quantile count, so breaking the monotonicity // guarantee causes bucketQuantile() to return undefined (nonsense) results. // -// As a somewhat hacky solution, we calculate the "envelope" of the histogram -// buckets, essentially removing any decreases in the count between successive -// buckets. We return a bool to indicate if this monotonicity was forced or not. -func ensureMonotonic(buckets buckets) bool { - forced := false - max := buckets[0].count +// As a somewhat hacky solution, we first silently ignore any numerically +// insignificant (relative delta below the requested tolerance and likely to +// be from floating point precision errors) differences between successive +// buckets regardless of the direction. Then we calculate the "envelope" of +// the histogram buckets, essentially removing any decreases in the count +// between successive buckets. +// +// We return a bool to indicate if this monotonicity was forced or not, and +// another bool to indicate if small deltas were ignored or not. +func ensureMonotonicAndIgnoreSmallDeltas(buckets buckets, tolerance float64) (bool, bool) { + var forcedMonotonic, fixedPrecision bool + prev := buckets[0].count for i := 1; i < len(buckets); i++ { - switch { - case buckets[i].count > max: - max = buckets[i].count - case buckets[i].count < max: - buckets[i].count = max - forced = true + curr := buckets[i].count // Assumed always positive. + if curr == prev { + // No correction needed if the counts are identical between buckets. + continue } + if almostEqual(prev, curr, tolerance) { + // Silently correct numerically insignificant differences from floating + // point precision errors, regardless of direction. + // Do not update the 'prev' value as we are ignoring the difference. + buckets[i].count = prev + fixedPrecision = true + continue + } + if curr < prev { + // Force monotonicity by removing any decreases regardless of magnitude. + // Do not update the 'prev' value as we are ignoring the decrease. + buckets[i].count = prev + forcedMonotonic = true + continue + } + prev = curr } - return forced + return forcedMonotonic, fixedPrecision } // quantile calculates the given quantile of a vector of samples. diff --git a/promql/quantile_test.go b/promql/quantile_test.go new file mode 100644 index 000000000..84f4c0b06 --- /dev/null +++ b/promql/quantile_test.go @@ -0,0 +1,318 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBucketQuantile_ForcedMonotonicity(t *testing.T) { + eps := 1e-12 + + for name, tc := range map[string]struct { + getInput func() buckets // The buckets can be modified in-place so return a new one each time. + expectedForced bool + expectedFixed bool + expectedValues map[float64]float64 + }{ + "simple - monotonic": { + getInput: func() buckets { + return buckets{ + { + upperBound: 10, + count: 10, + }, { + upperBound: 15, + count: 15, + }, { + upperBound: 20, + count: 15, + }, { + upperBound: 30, + count: 15, + }, { + upperBound: math.Inf(1), + count: 15, + }, + } + }, + expectedForced: false, + expectedFixed: false, + expectedValues: map[float64]float64{ + 1: 15., + 0.99: 14.85, + 0.9: 13.5, + 0.5: 7.5, + }, + }, + "simple - non-monotonic middle": { + getInput: func() buckets { + return buckets{ + { + upperBound: 10, + count: 10, + }, { + upperBound: 15, + count: 15, + }, { + upperBound: 20, + count: 15.00000000001, // Simulate the case there's a small imprecision in float64. + }, { + upperBound: 30, + count: 15, + }, { + upperBound: math.Inf(1), + count: 15, + }, + } + }, + expectedForced: false, + expectedFixed: true, + expectedValues: map[float64]float64{ + 1: 15., + 0.99: 14.85, + 0.9: 13.5, + 0.5: 7.5, + }, + }, + "real example - monotonic": { + getInput: func() buckets { + return buckets{ + { + upperBound: 1, + count: 6454661.3014166197, + }, { + upperBound: 5, + count: 8339611.2001912938, + }, { + upperBound: 10, + count: 14118319.2444762159, + }, { + upperBound: 25, + count: 14130031.5272856522, + }, { + upperBound: 50, + count: 46001270.3030008152, + }, { + upperBound: 64, + count: 46008473.8585563600, + }, { + upperBound: 80, + count: 46008473.8585563600, + }, { + upperBound: 100, + count: 46008473.8585563600, + }, { + upperBound: 250, + count: 46008473.8585563600, + }, { + upperBound: 1000, + count: 46008473.8585563600, + }, { + upperBound: math.Inf(1), + count: 46008473.8585563600, + }, + } + }, + expectedForced: false, + expectedFixed: false, + expectedValues: map[float64]float64{ + 1: 64., + 0.99: 49.64475715376406, + 0.9: 46.39671690938454, + 0.5: 31.96098248992002, + }, + }, + "real example - non-monotonic": { + getInput: func() buckets { + return buckets{ + { + upperBound: 1, + count: 6454661.3014166225, + }, { + upperBound: 5, + count: 8339611.2001912957, + }, { + upperBound: 10, + count: 14118319.2444762159, + }, { + upperBound: 25, + count: 14130031.5272856504, + }, { + upperBound: 50, + count: 46001270.3030008227, + }, { + upperBound: 64, + count: 46008473.8585563824, + }, { + upperBound: 80, + count: 46008473.8585563898, + }, { + upperBound: 100, + count: 46008473.8585563824, + }, { + upperBound: 250, + count: 46008473.8585563824, + }, { + upperBound: 1000, + count: 46008473.8585563898, + }, { + upperBound: math.Inf(1), + count: 46008473.8585563824, + }, + } + }, + expectedForced: false, + expectedFixed: true, + expectedValues: map[float64]float64{ + 1: 64., + 0.99: 49.64475715376406, + 0.9: 46.39671690938454, + 0.5: 31.96098248992002, + }, + }, + "real example 2 - monotonic": { + getInput: func() buckets { + return buckets{ + { + upperBound: 0.005, + count: 9.6, + }, { + upperBound: 0.01, + count: 9.688888889, + }, { + upperBound: 0.025, + count: 9.755555556, + }, { + upperBound: 0.05, + count: 9.844444444, + }, { + upperBound: 0.1, + count: 9.888888889, + }, { + upperBound: 0.25, + count: 9.888888889, + }, { + upperBound: 0.5, + count: 9.888888889, + }, { + upperBound: 1, + count: 9.888888889, + }, { + upperBound: 2.5, + count: 9.888888889, + }, { + upperBound: 5, + count: 9.888888889, + }, { + upperBound: 10, + count: 9.888888889, + }, { + upperBound: 25, + count: 9.888888889, + }, { + upperBound: 50, + count: 9.888888889, + }, { + upperBound: 100, + count: 9.888888889, + }, { + upperBound: math.Inf(1), + count: 9.888888889, + }, + } + }, + expectedForced: false, + expectedFixed: false, + expectedValues: map[float64]float64{ + 1: 0.1, + 0.99: 0.03468750000281261, + 0.9: 0.00463541666671875, + 0.5: 0.0025752314815104174, + }, + }, + "real example 2 - non-monotonic": { + getInput: func() buckets { + return buckets{ + { + upperBound: 0.005, + count: 9.6, + }, { + upperBound: 0.01, + count: 9.688888889, + }, { + upperBound: 0.025, + count: 9.755555556, + }, { + upperBound: 0.05, + count: 9.844444444, + }, { + upperBound: 0.1, + count: 9.888888889, + }, { + upperBound: 0.25, + count: 9.888888889, + }, { + upperBound: 0.5, + count: 9.888888889, + }, { + upperBound: 1, + count: 9.888888889, + }, { + upperBound: 2.5, + count: 9.888888889, + }, { + upperBound: 5, + count: 9.888888889, + }, { + upperBound: 10, + count: 9.888888889001, // Simulate the case there's a small imprecision in float64. + }, { + upperBound: 25, + count: 9.888888889, + }, { + upperBound: 50, + count: 9.888888888999, // Simulate the case there's a small imprecision in float64. + }, { + upperBound: 100, + count: 9.888888889, + }, { + upperBound: math.Inf(1), + count: 9.888888889, + }, + } + }, + expectedForced: false, + expectedFixed: true, + expectedValues: map[float64]float64{ + 1: 0.1, + 0.99: 0.03468750000281261, + 0.9: 0.00463541666671875, + 0.5: 0.0025752314815104174, + }, + }, + } { + t.Run(name, func(t *testing.T) { + for q, v := range tc.expectedValues { + res, forced, fixed := bucketQuantile(q, tc.getInput()) + require.Equal(t, tc.expectedForced, forced) + require.Equal(t, tc.expectedFixed, fixed) + require.InEpsilon(t, v, res, eps) + } + }) + } +} diff --git a/promql/test.go b/promql/test.go index f6a31ee43..7274a8f0d 100644 --- a/promql/test.go +++ b/promql/test.go @@ -49,7 +49,7 @@ var ( ) const ( - epsilon = 0.000001 // Relative error allowed for sample values. + defaultEpsilon = 0.000001 // Relative error allowed for sample values. ) var testStartTime = time.Unix(0, 0).UTC() @@ -440,7 +440,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error { if (expH == nil) != (v.H == nil) || (expH != nil && !expH.Equals(v.H)) { return fmt.Errorf("expected %v for %s but got %s", HistogramTestExpression(expH), v.Metric, HistogramTestExpression(v.H)) } - if !almostEqual(exp0.Value, v.F) { + if !almostEqual(exp0.Value, v.F, defaultEpsilon) { return fmt.Errorf("expected %v for %s but got %v", exp0.Value, v.Metric, v.F) } @@ -464,7 +464,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error { if exp0.Histogram != nil { return fmt.Errorf("expected Histogram %v but got scalar %s", exp0.Histogram.TestExpression(), val.String()) } - if !almostEqual(exp0.Value, val.V) { + if !almostEqual(exp0.Value, val.V, defaultEpsilon) { return fmt.Errorf("expected Scalar %v but got %v", val.V, exp0.Value) } @@ -663,9 +663,9 @@ func (t *test) clear() { t.context, t.cancelCtx = context.WithCancel(context.Background()) } -// samplesAlmostEqual returns true if the two sample lines only differ by a -// small relative error in their sample value. -func almostEqual(a, b float64) bool { +// almostEqual returns true if a and b differ by less than their sum +// multiplied by epsilon. +func almostEqual(a, b, epsilon float64) bool { // NaN has no equality but for testing we still want to know whether both values // are NaN. if math.IsNaN(a) && math.IsNaN(b) { @@ -677,12 +677,13 @@ func almostEqual(a, b float64) bool { return true } + absSum := math.Abs(a) + math.Abs(b) diff := math.Abs(a - b) - if a == 0 || b == 0 || diff < minNormal { + if a == 0 || b == 0 || absSum < minNormal { return diff < epsilon*minNormal } - return diff/(math.Abs(a)+math.Abs(b)) < epsilon + return diff/math.Min(absSum, math.MaxFloat64) < epsilon } func parseNumber(s string) (float64, error) { diff --git a/util/annotations/annotations.go b/util/annotations/annotations.go index fa4983fc9..2041d0217 100644 --- a/util/annotations/annotations.go +++ b/util/annotations/annotations.go @@ -108,7 +108,7 @@ var ( MixedClassicNativeHistogramsWarning = fmt.Errorf("%w: vector contains a mix of classic and native histograms for metric name", PromQLWarning) PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo) - HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (and may give inaccurate results) for metric name", PromQLInfo) + HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo) ) type annoErr struct { From ea1862aab4e69a26806c8b876b9a80797f190982 Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Sat, 25 Nov 2023 11:38:15 +0200 Subject: [PATCH 609/632] Explicit schema check in [Float]Histogram.ReduceResolution Signed-off-by: Linas Medziunas --- model/histogram/float_histogram.go | 4 ++++ model/histogram/histogram.go | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index e0f5d208e..05a20f197 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -1110,6 +1110,10 @@ func floatBucketsMatch(b1, b2 []float64) bool { // ReduceResolution reduces the float histogram's spans, buckets into target schema. // The target schema must be smaller than the current float histogram's schema. func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram { + if targetSchema >= h.Schema { + panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema)) + } + h.PositiveSpans, h.PositiveBuckets = reduceResolution(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, false) h.NegativeSpans, h.NegativeBuckets = reduceResolution(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, false) h.Schema = targetSchema diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go index 3ebb27fbc..4a12498f2 100644 --- a/model/histogram/histogram.go +++ b/model/histogram/histogram.go @@ -497,6 +497,10 @@ func (c *cumulativeBucketIterator) At() Bucket[uint64] { // ReduceResolution reduces the histogram's spans, buckets into target schema. // The target schema must be smaller than the current histogram's schema. func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram { + if targetSchema >= h.Schema { + panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema)) + } + h.PositiveSpans, h.PositiveBuckets = reduceResolution( h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, true, ) From f99ecc376ee4c052527abf80f4cc0c3e112f4355 Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Sun, 26 Nov 2023 09:26:34 +0200 Subject: [PATCH 610/632] Fix FloatHistogram.Add/Sub mutating its argument Signed-off-by: Linas Medziunas --- model/histogram/float_histogram.go | 38 +++++++++++++++++++------ model/histogram/float_histogram_test.go | 6 ++++ 2 files changed, 36 insertions(+), 8 deletions(-) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index e0f5d208e..18e5112ce 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -268,12 +268,23 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { h.Count += other.Count h.Sum += other.Sum - if other.Schema != h.Schema { - other = other.ReduceResolution(h.Schema) + var ( + otherPositiveSpans = other.PositiveSpans + otherPositiveBuckets = other.PositiveBuckets + otherNegativeSpans = other.NegativeSpans + otherNegativeBuckets = other.NegativeBuckets + ) + + if other.Schema < h.Schema { + panic(fmt.Errorf("cannot add histogram with schema %d to %d", other.Schema, h.Schema)) + } else if other.Schema > h.Schema { + otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false) + otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false) } - h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.PositiveSpans, h.PositiveBuckets, other.PositiveSpans, other.PositiveBuckets) - h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.NegativeSpans, h.NegativeBuckets, other.NegativeSpans, other.NegativeBuckets) + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + return h } @@ -284,12 +295,23 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { h.Count -= other.Count h.Sum -= other.Sum - if other.Schema != h.Schema { - other = other.ReduceResolution(h.Schema) + var ( + otherPositiveSpans = other.PositiveSpans + otherPositiveBuckets = other.PositiveBuckets + otherNegativeSpans = other.NegativeSpans + otherNegativeBuckets = other.NegativeBuckets + ) + + if other.Schema < h.Schema { + panic(fmt.Errorf("cannot subtract histigram with schema %d to %d", other.Schema, h.Schema)) + } else if other.Schema > h.Schema { + otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false) + otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false) } - h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.PositiveSpans, h.PositiveBuckets, other.PositiveSpans, other.PositiveBuckets) - h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.NegativeSpans, h.NegativeBuckets, other.NegativeSpans, other.NegativeBuckets) + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + return h } diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index bfe3525fa..cbcd2a1f0 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -1573,9 +1573,12 @@ func TestFloatHistogramAdd(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { + in2Copy := c.in2.Copy() require.Equal(t, c.expected, c.in1.Add(c.in2)) // Has it also happened in-place? require.Equal(t, c.expected, c.in1) + // Check that the argument was not mutated. + require.Equal(t, in2Copy, c.in2) }) } } @@ -1659,9 +1662,12 @@ func TestFloatHistogramSub(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { + in2Copy := c.in2.Copy() require.Equal(t, c.expected, c.in1.Sub(c.in2)) // Has it also happened in-place? require.Equal(t, c.expected, c.in1) + // Check that the argument was not mutated. + require.Equal(t, in2Copy, c.in2) }) } } From 19ecc5dd94247793409ffe35d5714ba20eb7f7dd Mon Sep 17 00:00:00 2001 From: Ziqi Zhao Date: Sun, 26 Nov 2023 22:17:37 +0800 Subject: [PATCH 611/632] add test case for bigGap Signed-off-by: Ziqi Zhao --- scrape/target_test.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/scrape/target_test.go b/scrape/target_test.go index 604cfa995..6631f328c 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -507,6 +507,19 @@ func TestBucketLimitAppender(t *testing.T) { NegativeBuckets: []int64{3, 0, 0}, } + bigGap := histogram.Histogram{ + Schema: 0, + Count: 21, + Sum: 33, + ZeroThreshold: 0.001, + ZeroCount: 3, + PositiveSpans: []histogram.Span{ + {Offset: 1, Length: 1}, // in (1, 2] + {Offset: 2, Length: 1}, // in (8, 16] + }, + PositiveBuckets: []int64{1, 0}, // 1, 1 + } + cases := []struct { h histogram.Histogram limit int @@ -533,6 +546,13 @@ func TestBucketLimitAppender(t *testing.T) { expectBucketCount: 6, expectSchema: 0, }, + { + h: bigGap, + limit: 1, + expectError: false, + expectBucketCount: 1, + expectSchema: -2, + }, } resApp := &collectResultAppender{} From ecc37588b00c5ec1d4f2349266a23b01f6c91743 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Mon, 27 Nov 2023 16:40:30 +0100 Subject: [PATCH 612/632] tsdb: seriesHashmap.set by making receiver a pointer (#13193) * Fix tsdb.seriesHashmap.set by making receiver a pointer The method tsdb.seriesHashmap.set currently doesn't set the conflicts field properly, due to the receiver being a non-pointer. Fix by turning the receiver into a pointer, and add a corresponding regression test. Signed-off-by: Arve Knudsen --- tsdb/head.go | 4 ++-- tsdb/head_test.go | 51 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 2 deletions(-) diff --git a/tsdb/head.go b/tsdb/head.go index bf181a415..25209cd01 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -1718,7 +1718,7 @@ func (m *seriesHashmap) get(hash uint64, lset labels.Labels) *memSeries { return nil } -func (m seriesHashmap) set(hash uint64, s *memSeries) { +func (m *seriesHashmap) set(hash uint64, s *memSeries) { if existing, found := m.unique[hash]; !found || labels.Equal(existing.lset, s.lset) { m.unique[hash] = s return @@ -1736,7 +1736,7 @@ func (m seriesHashmap) set(hash uint64, s *memSeries) { m.conflicts[hash] = append(l, s) } -func (m seriesHashmap) del(hash uint64, lset labels.Labels) { +func (m *seriesHashmap) del(hash uint64, lset labels.Labels) { var rem []*memSeries unique, found := m.unique[hash] switch { diff --git a/tsdb/head_test.go b/tsdb/head_test.go index f2325039a..64237e76a 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -5542,3 +5542,54 @@ func TestHeadCompactionWhileAppendAndCommitExemplar(t *testing.T) { app.Commit() h.Close() } + +func labelsWithHashCollision() (labels.Labels, labels.Labels) { + // These two series have the same XXHash; thanks to https://github.com/pstibrany/labels_hash_collisions + ls1 := labels.FromStrings("__name__", "metric", "lbl1", "value", "lbl2", "l6CQ5y") + ls2 := labels.FromStrings("__name__", "metric", "lbl1", "value", "lbl2", "v7uDlF") + + if ls1.Hash() != ls2.Hash() { + // These ones are the same when using -tags stringlabels + ls1 = labels.FromStrings("__name__", "metric", "lbl", "HFnEaGl") + ls2 = labels.FromStrings("__name__", "metric", "lbl", "RqcXatm") + } + + if ls1.Hash() != ls2.Hash() { + panic("This code needs to be updated: find new labels with colliding hash values.") + } + + return ls1, ls2 +} + +func TestStripeSeries_getOrSet(t *testing.T) { + lbls1, lbls2 := labelsWithHashCollision() + ms1 := memSeries{ + lset: lbls1, + } + ms2 := memSeries{ + lset: lbls2, + } + hash := lbls1.Hash() + s := newStripeSeries(1, noopSeriesLifecycleCallback{}) + + got, created, err := s.getOrSet(hash, lbls1, func() *memSeries { + return &ms1 + }) + require.NoError(t, err) + require.True(t, created) + require.Same(t, &ms1, got) + + // Add a conflicting series + got, created, err = s.getOrSet(hash, lbls2, func() *memSeries { + return &ms2 + }) + require.NoError(t, err) + require.True(t, created) + require.Same(t, &ms2, got) + + // Verify that we can get both of the series despite the hash collision + got = s.getByHash(hash, lbls1) + require.Same(t, &ms1, got) + got = s.getByHash(hash, lbls2) + require.Same(t, &ms2, got) +} From 5bee0cfce2813cc81ef43b33bdc51ea31214284e Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Tue, 28 Nov 2023 10:14:29 +0000 Subject: [PATCH 613/632] Change `ChunkReader.Chunk()` to `ChunkOrIterable()` The ChunkReader interface's Chunk() has been changed to ChunkOrIterable(). This is a precursor to OOO native histogram support - with OOO native histograms, the chunks.Meta passed to Chunk() can result in multiple chunks being returned rather than just a single chunk (e.g. if oooMergedChunk has a counter reset in the middle). To support this, ChunkOrIterable() requires either a single chunk or an iterable to be returned. If an iterable is returned, the caller has the responsibility of converting the samples from the iterable into possibly multiple chunks. The OOOHeadChunkReader now returns an iterable rather than a chunk to prepare for the native histograms case. Also as a beneficial side effect, oooMergedChunk and boundedChunk has been simplified as they only need to implement the Iterable interface now, not the full Chunk interface. --------- Signed-off-by: Fiona Liao Co-authored-by: George Krajcsovits --- cmd/promtool/tsdb.go | 8 +- storage/merge.go | 8 +- tsdb/block.go | 15 +- tsdb/block_test.go | 13 + tsdb/chunkenc/chunk.go | 27 ++- tsdb/chunks/chunks.go | 24 +- tsdb/chunks/chunks_test.go | 2 +- tsdb/compact_test.go | 40 ++++ tsdb/db_test.go | 6 +- tsdb/head_read.go | 99 +++----- tsdb/head_read_test.go | 15 +- tsdb/head_test.go | 22 +- tsdb/ooo_head_read.go | 16 +- tsdb/ooo_head_read_test.go | 13 +- tsdb/querier.go | 247 ++++++++++++++++--- tsdb/querier_test.go | 475 +++++++++++++++++++++++++++++++++---- tsdb/tsdbutil/histogram.go | 10 + 17 files changed, 819 insertions(+), 221 deletions(-) diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 7b631000a..e6df9b78c 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -16,6 +16,7 @@ package main import ( "bufio" "context" + "errors" "fmt" "io" "os" @@ -643,10 +644,15 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb. for _, chk := range chks { // Load the actual data of the chunk. - chk, err := chunkr.Chunk(chk) + chk, iterable, err := chunkr.ChunkOrIterable(chk) if err != nil { return err } + // Chunks within blocks should not need to be re-written, so an + // iterable is not expected to be returned from the chunk reader. + if iterable != nil { + return errors.New("ChunkOrIterable should not return an iterable when reading a block") + } switch chk.Encoding() { case chunkenc.EncXOR: floatChunkSamplesCount = append(floatChunkSamplesCount, chk.NumSamples()) diff --git a/storage/merge.go b/storage/merge.go index 501e8db09..b4ebb440f 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -473,10 +473,10 @@ func ChainSampleIteratorFromSeries(it chunkenc.Iterator, series []Series) chunke return csi } -func ChainSampleIteratorFromMetas(it chunkenc.Iterator, chunks []chunks.Meta) chunkenc.Iterator { - csi := getChainSampleIterator(it, len(chunks)) - for i, c := range chunks { - csi.iterators[i] = c.Chunk.Iterator(csi.iterators[i]) +func ChainSampleIteratorFromIterables(it chunkenc.Iterator, iterables []chunkenc.Iterable) chunkenc.Iterator { + csi := getChainSampleIterator(it, len(iterables)) + for i, c := range iterables { + csi.iterators[i] = c.Iterator(csi.iterators[i]) } return csi } diff --git a/tsdb/block.go b/tsdb/block.go index b995e4fd5..a586536b1 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -117,8 +117,19 @@ type ChunkWriter interface { // ChunkReader provides reading access of serialized time series data. type ChunkReader interface { - // Chunk returns the series data chunk with the given reference. - Chunk(meta chunks.Meta) (chunkenc.Chunk, error) + // ChunkOrIterable returns the series data for the given chunks.Meta. + // Either a single chunk will be returned, or an iterable. + // A single chunk should be returned if chunks.Meta maps to a chunk that + // already exists and doesn't need modifications. + // An iterable should be returned if chunks.Meta maps to a subset of the + // samples in a stored chunk, or multiple chunks. (E.g. OOOHeadChunkReader + // could return an iterable where multiple histogram samples have counter + // resets. There can only be one counter reset per histogram chunk so + // multiple chunks would be created from the iterable in this case.) + // Only one of chunk or iterable should be returned. In some cases you may + // always expect a chunk to be returned. You can check that iterable is nil + // in those cases. + ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) // Close releases all underlying resources of the reader. Close() error diff --git a/tsdb/block_test.go b/tsdb/block_test.go index c49c1d5a8..46e6ecf84 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -501,6 +501,19 @@ func createBlockFromHead(tb testing.TB, dir string, head *Head) string { return filepath.Join(dir, ulid.String()) } +func createBlockFromOOOHead(tb testing.TB, dir string, head *OOOCompactionHead) string { + compactor, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil, nil) + require.NoError(tb, err) + + require.NoError(tb, os.MkdirAll(dir, 0o777)) + + // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). + // Because of this block intervals are always +1 than the total samples it includes. + ulid, err := compactor.Write(dir, head, head.MinTime(), head.MaxTime()+1, nil) + require.NoError(tb, err) + return filepath.Join(dir, ulid.String()) +} + func createHead(tb testing.TB, w *wlog.WL, series []storage.Series, chunkDir string) *Head { opts := DefaultHeadOptions() opts.ChunkDirRoot = chunkDir diff --git a/tsdb/chunkenc/chunk.go b/tsdb/chunkenc/chunk.go index f4d11986c..0126f1fbd 100644 --- a/tsdb/chunkenc/chunk.go +++ b/tsdb/chunkenc/chunk.go @@ -67,6 +67,8 @@ const ( // Chunk holds a sequence of sample pairs that can be iterated over and appended to. type Chunk interface { + Iterable + // Bytes returns the underlying byte slice of the chunk. Bytes() []byte @@ -76,11 +78,6 @@ type Chunk interface { // Appender returns an appender to append samples to the chunk. Appender() (Appender, error) - // The iterator passed as argument is for re-use. - // Depending on implementation, the iterator can - // be re-used or a new iterator can be allocated. - Iterator(Iterator) Iterator - // NumSamples returns the number of samples in the chunk. NumSamples() int @@ -92,6 +89,13 @@ type Chunk interface { Compact() } +type Iterable interface { + // The iterator passed as argument is for re-use. + // Depending on implementation, the iterator can + // be re-used or a new iterator can be allocated. + Iterator(Iterator) Iterator +} + // Appender adds sample pairs to a chunk. type Appender interface { Append(int64, float64) @@ -184,6 +188,19 @@ func (v ValueType) ChunkEncoding() Encoding { } } +func (v ValueType) NewChunk() (Chunk, error) { + switch v { + case ValFloat: + return NewXORChunk(), nil + case ValHistogram: + return NewHistogramChunk(), nil + case ValFloatHistogram: + return NewFloatHistogramChunk(), nil + default: + return nil, fmt.Errorf("value type %v unsupported", v) + } +} + // MockSeriesIterator returns an iterator for a mock series with custom timeStamps and values. func MockSeriesIterator(timestamps []int64, values []float64) Iterator { return &mockSeriesIterator{ diff --git a/tsdb/chunks/chunks.go b/tsdb/chunks/chunks.go index c4c4e3c93..f22285a0c 100644 --- a/tsdb/chunks/chunks.go +++ b/tsdb/chunks/chunks.go @@ -117,11 +117,16 @@ func (b BlockChunkRef) Unpack() (int, int) { return sgmIndex, chkStart } -// Meta holds information about a chunk of data. +// Meta holds information about one or more chunks. +// For examples of when chunks.Meta could refer to multiple chunks, see +// ChunkReader.ChunkOrIterable(). type Meta struct { // Ref and Chunk hold either a reference that can be used to retrieve // chunk data or the data itself. - // If Chunk is nil, call ChunkReader.Chunk(Meta.Ref) to get the chunk and assign it to the Chunk field + // If Chunk is nil, call ChunkReader.ChunkOrIterable(Meta.Ref) to get the + // chunk and assign it to the Chunk field. If an iterable is returned from + // that method, then it may not be possible to set Chunk as the iterable + // might form several chunks. Ref ChunkRef Chunk chunkenc.Chunk @@ -667,24 +672,24 @@ func (s *Reader) Size() int64 { } // Chunk returns a chunk from a given reference. -func (s *Reader) Chunk(meta Meta) (chunkenc.Chunk, error) { +func (s *Reader) ChunkOrIterable(meta Meta) (chunkenc.Chunk, chunkenc.Iterable, error) { sgmIndex, chkStart := BlockChunkRef(meta.Ref).Unpack() if sgmIndex >= len(s.bs) { - return nil, fmt.Errorf("segment index %d out of range", sgmIndex) + return nil, nil, fmt.Errorf("segment index %d out of range", sgmIndex) } sgmBytes := s.bs[sgmIndex] if chkStart+MaxChunkLengthFieldSize > sgmBytes.Len() { - return nil, fmt.Errorf("segment doesn't include enough bytes to read the chunk size data field - required:%v, available:%v", chkStart+MaxChunkLengthFieldSize, sgmBytes.Len()) + return nil, nil, fmt.Errorf("segment doesn't include enough bytes to read the chunk size data field - required:%v, available:%v", chkStart+MaxChunkLengthFieldSize, sgmBytes.Len()) } // With the minimum chunk length this should never cause us reading // over the end of the slice. c := sgmBytes.Range(chkStart, chkStart+MaxChunkLengthFieldSize) chkDataLen, n := binary.Uvarint(c) if n <= 0 { - return nil, fmt.Errorf("reading chunk length failed with %d", n) + return nil, nil, fmt.Errorf("reading chunk length failed with %d", n) } chkEncStart := chkStart + n @@ -693,17 +698,18 @@ func (s *Reader) Chunk(meta Meta) (chunkenc.Chunk, error) { chkDataEnd := chkEnd - crc32.Size if chkEnd > sgmBytes.Len() { - return nil, fmt.Errorf("segment doesn't include enough bytes to read the chunk - required:%v, available:%v", chkEnd, sgmBytes.Len()) + return nil, nil, fmt.Errorf("segment doesn't include enough bytes to read the chunk - required:%v, available:%v", chkEnd, sgmBytes.Len()) } sum := sgmBytes.Range(chkDataEnd, chkEnd) if err := checkCRC32(sgmBytes.Range(chkEncStart, chkDataEnd), sum); err != nil { - return nil, err + return nil, nil, err } chkData := sgmBytes.Range(chkDataStart, chkDataEnd) chkEnc := sgmBytes.Range(chkEncStart, chkEncStart+ChunkEncodingSize)[0] - return s.pool.Get(chunkenc.Encoding(chkEnc), chkData) + chk, err := s.pool.Get(chunkenc.Encoding(chkEnc), chkData) + return chk, nil, err } func nextSequenceFile(dir string) (string, int, error) { diff --git a/tsdb/chunks/chunks_test.go b/tsdb/chunks/chunks_test.go index affaa4b9f..86f2fbddb 100644 --- a/tsdb/chunks/chunks_test.go +++ b/tsdb/chunks/chunks_test.go @@ -23,6 +23,6 @@ func TestReaderWithInvalidBuffer(t *testing.T) { b := realByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81}) r := &Reader{bs: []ByteSlice{b}} - _, err := r.Chunk(Meta{Ref: 0}) + _, _, err := r.ChunkOrIterable(Meta{Ref: 0}) require.Error(t, err) } diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index cf8c2439a..94b35e3b4 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -1144,6 +1144,46 @@ func BenchmarkCompactionFromHead(b *testing.B) { } } +func BenchmarkCompactionFromOOOHead(b *testing.B) { + dir := b.TempDir() + totalSeries := 100000 + totalSamples := 100 + for labelNames := 1; labelNames < totalSeries; labelNames *= 10 { + labelValues := totalSeries / labelNames + b.Run(fmt.Sprintf("labelnames=%d,labelvalues=%d", labelNames, labelValues), func(b *testing.B) { + chunkDir := b.TempDir() + opts := DefaultHeadOptions() + opts.ChunkRange = 1000 + opts.ChunkDirRoot = chunkDir + opts.OutOfOrderTimeWindow.Store(int64(totalSamples)) + h, err := NewHead(nil, nil, nil, nil, opts, nil) + require.NoError(b, err) + for ln := 0; ln < labelNames; ln++ { + app := h.Appender(context.Background()) + for lv := 0; lv < labelValues; lv++ { + lbls := labels.FromStrings(fmt.Sprintf("%d", ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln)) + _, err = app.Append(0, lbls, int64(totalSamples), 0) + require.NoError(b, err) + for ts := 0; ts < totalSamples; ts++ { + _, err = app.Append(0, lbls, int64(ts), float64(ts)) + require.NoError(b, err) + } + } + require.NoError(b, app.Commit()) + } + + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + oooHead, err := NewOOOCompactionHead(context.TODO(), h) + require.NoError(b, err) + createBlockFromOOOHead(b, filepath.Join(dir, fmt.Sprintf("%d-%d", i, labelNames)), oooHead) + } + h.Close() + }) + } +} + // TestDisableAutoCompactions checks that we can // disable and enable the auto compaction. // This is needed for unit tests that rely on diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 5728b49bd..f602f5ee9 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -2909,8 +2909,9 @@ func TestChunkWriter_ReadAfterWrite(t *testing.T) { for _, chks := range test.chks { for _, chkExp := range chks { - chkAct, err := r.Chunk(chkExp) + chkAct, iterable, err := r.ChunkOrIterable(chkExp) require.NoError(t, err) + require.Nil(t, iterable) require.Equal(t, chkExp.Chunk.Bytes(), chkAct.Bytes()) } } @@ -2969,8 +2970,9 @@ func TestChunkReader_ConcurrentReads(t *testing.T) { go func(chunk chunks.Meta) { defer wg.Done() - chkAct, err := r.Chunk(chunk) + chkAct, iterable, err := r.ChunkOrIterable(chunk) require.NoError(t, err) + require.Nil(t, iterable) require.Equal(t, chunk.Chunk.Bytes(), chkAct.Bytes()) }(chk) } diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 32a23499a..35ef26a58 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -289,10 +289,10 @@ func (h *headChunkReader) Close() error { return nil } -// Chunk returns the chunk for the reference number. -func (h *headChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { +// ChunkOrIterable returns the chunk for the reference number. +func (h *headChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) { chk, _, err := h.chunk(meta, false) - return chk, err + return chk, nil, err } // ChunkWithCopy returns the chunk for the reference number. @@ -416,13 +416,13 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi return elem, true, offset == 0, nil } -// oooMergedChunk returns the requested chunk based on the given chunks.Meta -// reference from memory or by m-mapping it from the disk. The returned chunk -// might be a merge of all the overlapping chunks, if any, amongst all the -// chunks in the OOOHead. +// oooMergedChunks return an iterable over one or more OOO chunks for the given +// chunks.Meta reference from memory or by m-mapping it from the disk. The +// returned iterable will be a merge of all the overlapping chunks, if any, +// amongst all the chunks in the OOOHead. // This function is not thread safe unless the caller holds a lock. // The caller must ensure that s.ooo is not nil. -func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, mint, maxt int64) (chunk *mergedOOOChunks, err error) { +func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, mint, maxt int64) (*mergedOOOChunks, error) { _, cid := chunks.HeadChunkRef(meta.Ref).Unpack() // ix represents the index of chunk in the s.mmappedChunks slice. The chunk meta's are @@ -499,11 +499,13 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper mc := &mergedOOOChunks{} absoluteMax := int64(math.MinInt64) for _, c := range tmpChks { - if c.meta.Ref != meta.Ref && (len(mc.chunks) == 0 || c.meta.MinTime > absoluteMax) { + if c.meta.Ref != meta.Ref && (len(mc.chunkIterables) == 0 || c.meta.MinTime > absoluteMax) { continue } + var iterable chunkenc.Iterable if c.meta.Ref == oooHeadRef { var xor *chunkenc.XORChunk + var err error // If head chunk min and max time match the meta OOO markers // that means that the chunk has not expanded so we can append // it as it is. @@ -516,7 +518,7 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper if err != nil { return nil, errors.Wrap(err, "failed to convert ooo head chunk to xor chunk") } - c.meta.Chunk = xor + iterable = xor } else { chk, err := cdm.Chunk(c.ref) if err != nil { @@ -531,12 +533,12 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper // wrap the chunk within a chunk that doesnt allows us to iterate // through samples out of the OOOLastMinT and OOOLastMaxT // markers. - c.meta.Chunk = boundedChunk{chk, meta.OOOLastMinTime, meta.OOOLastMaxTime} + iterable = boundedIterable{chk, meta.OOOLastMinTime, meta.OOOLastMaxTime} } else { - c.meta.Chunk = chk + iterable = chk } } - mc.chunks = append(mc.chunks, c.meta) + mc.chunkIterables = append(mc.chunkIterables, iterable) if c.meta.MaxTime > absoluteMax { absoluteMax = c.meta.MaxTime } @@ -545,77 +547,30 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper return mc, nil } -var _ chunkenc.Chunk = &mergedOOOChunks{} +var _ chunkenc.Iterable = &mergedOOOChunks{} -// mergedOOOChunks holds the list of overlapping chunks. This struct satisfies -// chunkenc.Chunk. +// mergedOOOChunks holds the list of iterables for overlapping chunks. type mergedOOOChunks struct { - chunks []chunks.Meta -} - -// Bytes is a very expensive method because its calling the iterator of all the -// chunks in the mergedOOOChunk and building a new chunk with the samples. -func (o mergedOOOChunks) Bytes() []byte { - xc := chunkenc.NewXORChunk() - app, err := xc.Appender() - if err != nil { - panic(err) - } - it := o.Iterator(nil) - for it.Next() == chunkenc.ValFloat { - t, v := it.At() - app.Append(t, v) - } - - return xc.Bytes() -} - -func (o mergedOOOChunks) Encoding() chunkenc.Encoding { - return chunkenc.EncXOR -} - -func (o mergedOOOChunks) Appender() (chunkenc.Appender, error) { - return nil, errors.New("can't append to mergedOOOChunks") + chunkIterables []chunkenc.Iterable } func (o mergedOOOChunks) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator { - return storage.ChainSampleIteratorFromMetas(iterator, o.chunks) + return storage.ChainSampleIteratorFromIterables(iterator, o.chunkIterables) } -func (o mergedOOOChunks) NumSamples() int { - samples := 0 - for _, c := range o.chunks { - samples += c.Chunk.NumSamples() - } - return samples -} +var _ chunkenc.Iterable = &boundedIterable{} -func (o mergedOOOChunks) Compact() {} - -var _ chunkenc.Chunk = &boundedChunk{} - -// boundedChunk is an implementation of chunkenc.Chunk that uses a +// boundedIterable is an implementation of chunkenc.Iterable that uses a // boundedIterator that only iterates through samples which timestamps are // >= minT and <= maxT. -type boundedChunk struct { - chunkenc.Chunk - minT int64 - maxT int64 +type boundedIterable struct { + chunk chunkenc.Chunk + minT int64 + maxT int64 } -func (b boundedChunk) Bytes() []byte { - xor := chunkenc.NewXORChunk() - a, _ := xor.Appender() - it := b.Iterator(nil) - for it.Next() == chunkenc.ValFloat { - t, v := it.At() - a.Append(t, v) - } - return xor.Bytes() -} - -func (b boundedChunk) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator { - it := b.Chunk.Iterator(iterator) +func (b boundedIterable) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator { + it := b.chunk.Iterator(iterator) if it == nil { panic("iterator shouldn't be nil") } diff --git a/tsdb/head_read_test.go b/tsdb/head_read_test.go index ad0a59d34..4f19db4c1 100644 --- a/tsdb/head_read_test.go +++ b/tsdb/head_read_test.go @@ -129,21 +129,10 @@ func TestBoundedChunk(t *testing.T) { } for _, tc := range tests { t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) { - chunk := boundedChunk{tc.inputChunk, tc.inputMinT, tc.inputMaxT} - - // Testing Bytes() - expChunk := chunkenc.NewXORChunk() - if tc.inputChunk.NumSamples() > 0 { - app, err := expChunk.Appender() - require.NoError(t, err) - for ts := tc.inputMinT; ts <= tc.inputMaxT; ts++ { - app.Append(ts, float64(ts)) - } - } - require.Equal(t, expChunk.Bytes(), chunk.Bytes()) + iterable := boundedIterable{tc.inputChunk, tc.inputMinT, tc.inputMaxT} var samples []sample - it := chunk.Iterator(nil) + it := iterable.Iterator(nil) if tc.initialSeek != 0 { // Testing Seek() diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 64237e76a..7adfab1c9 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -1835,16 +1835,16 @@ func TestGCChunkAccess(t *testing.T) { cr, err := h.chunksRange(0, 1500, nil) require.NoError(t, err) - _, err = cr.Chunk(chunks[0]) + _, _, err = cr.ChunkOrIterable(chunks[0]) require.NoError(t, err) - _, err = cr.Chunk(chunks[1]) + _, _, err = cr.ChunkOrIterable(chunks[1]) require.NoError(t, err) require.NoError(t, h.Truncate(1500)) // Remove a chunk. - _, err = cr.Chunk(chunks[0]) + _, _, err = cr.ChunkOrIterable(chunks[0]) require.Equal(t, storage.ErrNotFound, err) - _, err = cr.Chunk(chunks[1]) + _, _, err = cr.ChunkOrIterable(chunks[1]) require.NoError(t, err) } @@ -1894,18 +1894,18 @@ func TestGCSeriesAccess(t *testing.T) { cr, err := h.chunksRange(0, 2000, nil) require.NoError(t, err) - _, err = cr.Chunk(chunks[0]) + _, _, err = cr.ChunkOrIterable(chunks[0]) require.NoError(t, err) - _, err = cr.Chunk(chunks[1]) + _, _, err = cr.ChunkOrIterable(chunks[1]) require.NoError(t, err) require.NoError(t, h.Truncate(2000)) // Remove the series. require.Equal(t, (*memSeries)(nil), h.series.getByID(1)) - _, err = cr.Chunk(chunks[0]) + _, _, err = cr.ChunkOrIterable(chunks[0]) require.Equal(t, storage.ErrNotFound, err) - _, err = cr.Chunk(chunks[1]) + _, _, err = cr.ChunkOrIterable(chunks[1]) require.Equal(t, storage.ErrNotFound, err) } @@ -5406,8 +5406,9 @@ func TestCuttingNewHeadChunks(t *testing.T) { require.Len(t, chkMetas, len(tc.expectedChks)) for i, expected := range tc.expectedChks { - chk, err := chkReader.Chunk(chkMetas[i]) + chk, iterable, err := chkReader.ChunkOrIterable(chkMetas[i]) require.NoError(t, err) + require.Nil(t, iterable) require.Equal(t, expected.numSamples, chk.NumSamples()) require.Len(t, chk.Bytes(), expected.numBytes) @@ -5455,8 +5456,9 @@ func TestHeadDetectsDuplicateSampleAtSizeLimit(t *testing.T) { storedSampleCount := 0 for _, chunkMeta := range chunks { - chunk, err := chunkReader.Chunk(chunkMeta) + chunk, iterable, err := chunkReader.ChunkOrIterable(chunkMeta) require.NoError(t, err) + require.Nil(t, iterable) storedSampleCount += chunk.NumSamples() } diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index ace232657..440130f7d 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -247,33 +247,33 @@ func NewOOOHeadChunkReader(head *Head, mint, maxt int64, isoState *oooIsolationS } } -func (cr OOOHeadChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { +func (cr OOOHeadChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) { sid, _ := chunks.HeadChunkRef(meta.Ref).Unpack() s := cr.head.series.getByID(sid) // This means that the series has been garbage collected. if s == nil { - return nil, storage.ErrNotFound + return nil, nil, storage.ErrNotFound } s.Lock() if s.ooo == nil { // There is no OOO data for this series. s.Unlock() - return nil, storage.ErrNotFound + return nil, nil, storage.ErrNotFound } - c, err := s.oooMergedChunk(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt) + mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt) s.Unlock() if err != nil { - return nil, err + return nil, nil, err } // This means that the query range did not overlap with the requested chunk. - if len(c.chunks) == 0 { - return nil, storage.ErrNotFound + if len(mc.chunkIterables) == 0 { + return nil, nil, storage.ErrNotFound } - return c, nil + return nil, mc, nil } func (cr OOOHeadChunkReader) Close() error { diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index 3f4b9bae7..64577872f 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -486,9 +486,10 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { cr := NewOOOHeadChunkReader(db.head, 0, 1000, nil) defer cr.Close() - c, err := cr.Chunk(chunks.Meta{ + c, iterable, err := cr.ChunkOrIterable(chunks.Meta{ Ref: 0x1000000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300, }) + require.Nil(t, iterable) require.Equal(t, err, fmt.Errorf("not found")) require.Equal(t, c, nil) }) @@ -853,11 +854,12 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil) defer cr.Close() for i := 0; i < len(chks); i++ { - c, err := cr.Chunk(chks[i]) + c, iterable, err := cr.ChunkOrIterable(chks[i]) require.NoError(t, err) + require.Nil(t, c) var resultSamples chunks.SampleSlice - it := c.Iterator(nil) + it := iterable.Iterator(nil) for it.Next() == chunkenc.ValFloat { t, v := it.At() resultSamples = append(resultSamples, sample{t: t, f: v}) @@ -1025,11 +1027,12 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil) defer cr.Close() for i := 0; i < len(chks); i++ { - c, err := cr.Chunk(chks[i]) + c, iterable, err := cr.ChunkOrIterable(chks[i]) require.NoError(t, err) + require.Nil(t, c) var resultSamples chunks.SampleSlice - it := c.Iterator(nil) + it := iterable.Iterator(nil) for it.Next() == chunkenc.ValFloat { ts, v := it.At() resultSamples = append(resultSamples, sample{t: ts, f: v}) diff --git a/tsdb/querier.go b/tsdb/querier.go index a832c0d1b..a32c7dd96 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -632,36 +632,42 @@ func (b *blockBaseSeriesSet) Warnings() annotations.Annotations { return nil } // populateWithDelGenericSeriesIterator assumes that chunks that would be fully // removed by intervals are filtered out in previous phase. // -// On each iteration currChkMeta is available. If currDelIter is not nil, it -// means that the chunk iterator in currChkMeta is invalid and a chunk rewrite -// is needed, for which currDelIter should be used. +// On each iteration currMeta is available. If currDelIter is not nil, it +// means that the chunk in currMeta is invalid and a chunk rewrite is needed, +// for which currDelIter should be used. type populateWithDelGenericSeriesIterator struct { blockID ulid.ULID - chunks ChunkReader - // chks are expected to be sorted by minTime and should be related to + cr ChunkReader + // metas are expected to be sorted by minTime and should be related to // the same, single series. - chks []chunks.Meta + // It's possible for a single chunks.Meta to refer to multiple chunks. + // cr.ChunkOrIterator() would return an iterable and a nil chunk in this + // case. + metas []chunks.Meta - i int // Index into chks; -1 if not started yet. + i int // Index into metas; -1 if not started yet. err error bufIter DeletedIterator // Retained for memory re-use. currDelIter may point here. intervals tombstones.Intervals currDelIter chunkenc.Iterator - currChkMeta chunks.Meta + // currMeta is the current chunks.Meta from metas. currMeta.Chunk is set to + // the chunk returned from cr.ChunkOrIterable(). As that can return a nil + // chunk, currMeta.Chunk is not always guaranteed to be set. + currMeta chunks.Meta } func (p *populateWithDelGenericSeriesIterator) reset(blockID ulid.ULID, cr ChunkReader, chks []chunks.Meta, intervals tombstones.Intervals) { p.blockID = blockID - p.chunks = cr - p.chks = chks + p.cr = cr + p.metas = chks p.i = -1 p.err = nil // Note we don't touch p.bufIter.Iter; it is holding on to an iterator we might reuse in next(). p.bufIter.Intervals = p.bufIter.Intervals[:0] p.intervals = intervals p.currDelIter = nil - p.currChkMeta = chunks.Meta{} + p.currMeta = chunks.Meta{} } // If copyHeadChunk is true, then the head chunk (i.e. the in-memory chunk of the TSDB) @@ -669,43 +675,54 @@ func (p *populateWithDelGenericSeriesIterator) reset(blockID ulid.ULID, cr Chunk // However, if the deletion intervals overlaps with the head chunk, then the head chunk is // not copied irrespective of copyHeadChunk because it will be re-encoded later anyway. func (p *populateWithDelGenericSeriesIterator) next(copyHeadChunk bool) bool { - if p.err != nil || p.i >= len(p.chks)-1 { + if p.err != nil || p.i >= len(p.metas)-1 { return false } p.i++ - p.currChkMeta = p.chks[p.i] + p.currMeta = p.metas[p.i] p.bufIter.Intervals = p.bufIter.Intervals[:0] for _, interval := range p.intervals { - if p.currChkMeta.OverlapsClosedInterval(interval.Mint, interval.Maxt) { + if p.currMeta.OverlapsClosedInterval(interval.Mint, interval.Maxt) { p.bufIter.Intervals = p.bufIter.Intervals.Add(interval) } } - hcr, ok := p.chunks.(*headChunkReader) + hcr, ok := p.cr.(*headChunkReader) + var iterable chunkenc.Iterable if ok && copyHeadChunk && len(p.bufIter.Intervals) == 0 { // ChunkWithCopy will copy the head chunk. var maxt int64 - p.currChkMeta.Chunk, maxt, p.err = hcr.ChunkWithCopy(p.currChkMeta) + p.currMeta.Chunk, maxt, p.err = hcr.ChunkWithCopy(p.currMeta) // For the in-memory head chunk the index reader sets maxt as MaxInt64. We fix it here. - p.currChkMeta.MaxTime = maxt + p.currMeta.MaxTime = maxt } else { - p.currChkMeta.Chunk, p.err = p.chunks.Chunk(p.currChkMeta) + p.currMeta.Chunk, iterable, p.err = p.cr.ChunkOrIterable(p.currMeta) } + if p.err != nil { - p.err = errors.Wrapf(p.err, "cannot populate chunk %d from block %s", p.currChkMeta.Ref, p.blockID.String()) + p.err = errors.Wrapf(p.err, "cannot populate chunk %d from block %s", p.currMeta.Ref, p.blockID.String()) return false } - if len(p.bufIter.Intervals) == 0 { - // If there is no overlap with deletion intervals, we can take chunk as it is. - p.currDelIter = nil + // Use the single chunk if possible. + if p.currMeta.Chunk != nil { + if len(p.bufIter.Intervals) == 0 { + // If there is no overlap with deletion intervals and a single chunk is + // returned, we can take chunk as it is. + p.currDelIter = nil + return true + } + // Otherwise we need to iterate over the samples in the single chunk + // and create new chunks. + p.bufIter.Iter = p.currMeta.Chunk.Iterator(p.bufIter.Iter) + p.currDelIter = &p.bufIter return true } - // We don't want the full chunk, take just a part of it. - p.bufIter.Iter = p.currChkMeta.Chunk.Iterator(p.bufIter.Iter) + // Otherwise, use the iterable to create an iterator. + p.bufIter.Iter = iterable.Iterator(p.bufIter.Iter) p.currDelIter = &p.bufIter return true } @@ -765,7 +782,7 @@ func (p *populateWithDelSeriesIterator) Next() chunkenc.ValueType { if p.currDelIter != nil { p.curr = p.currDelIter } else { - p.curr = p.currChkMeta.Chunk.Iterator(p.curr) + p.curr = p.currMeta.Chunk.Iterator(p.curr) } if valueType := p.curr.Next(); valueType != chunkenc.ValNone { return valueType @@ -817,22 +834,61 @@ func (p *populateWithDelSeriesIterator) Err() error { type populateWithDelChunkSeriesIterator struct { populateWithDelGenericSeriesIterator - curr chunks.Meta + // currMetaWithChunk is current meta with its chunk field set. This meta + // is guaranteed to map to a single chunk. This differs from + // populateWithDelGenericSeriesIterator.currMeta as that + // could refer to multiple chunks. + currMetaWithChunk chunks.Meta + + // chunksFromIterable stores the chunks created from iterating through + // the iterable returned by cr.ChunkOrIterable() (with deleted samples + // removed). + chunksFromIterable []chunks.Meta + chunksFromIterableIdx int } func (p *populateWithDelChunkSeriesIterator) reset(blockID ulid.ULID, cr ChunkReader, chks []chunks.Meta, intervals tombstones.Intervals) { p.populateWithDelGenericSeriesIterator.reset(blockID, cr, chks, intervals) - p.curr = chunks.Meta{} + p.currMetaWithChunk = chunks.Meta{} + p.chunksFromIterable = p.chunksFromIterable[:0] + p.chunksFromIterableIdx = -1 } func (p *populateWithDelChunkSeriesIterator) Next() bool { + if p.currMeta.Chunk == nil { + // If we've been creating chunks from the iterable, check if there are + // any more chunks to iterate through. + if p.chunksFromIterableIdx < len(p.chunksFromIterable)-1 { + p.chunksFromIterableIdx++ + p.currMetaWithChunk = p.chunksFromIterable[p.chunksFromIterableIdx] + return true + } + } + + // Move to the next chunk/deletion iterator. if !p.next(true) { return false } - p.curr = p.currChkMeta - if p.currDelIter == nil { - return true + + if p.currMeta.Chunk != nil { + if p.currDelIter == nil { + p.currMetaWithChunk = p.currMeta + return true + } + // If ChunkOrIterable() returned a non-nil chunk, the samples in + // p.currDelIter will only form one chunk, as the only change + // p.currDelIter might make is deleting some samples. + return p.populateCurrForSingleChunk() } + + // If ChunkOrIterable() returned an iterable, multiple chunks may be + // created from the samples in p.currDelIter. + return p.populateChunksFromIterable() +} + +// populateCurrForSingleChunk sets the fields within p.currMetaWithChunk. This +// should be called if the samples in p.currDelIter only form one chunk. +func (p *populateWithDelChunkSeriesIterator) populateCurrForSingleChunk() bool { valueType := p.currDelIter.Next() if valueType == chunkenc.ValNone { if err := p.currDelIter.Err(); err != nil { @@ -840,9 +896,9 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool { } return false } - p.curr.MinTime = p.currDelIter.AtT() + p.currMetaWithChunk.MinTime = p.currDelIter.AtT() - // Re-encode the chunk if iterator is provider. This means that it has + // Re-encode the chunk if iterator is provided. This means that it has // some samples to be deleted or chunk is opened. var ( newChunk chunkenc.Chunk @@ -900,7 +956,7 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool { } } default: - err = fmt.Errorf("populateWithDelChunkSeriesIterator: value type %v unsupported", valueType) + err = fmt.Errorf("populateCurrForSingleChunk: value type %v unsupported", valueType) } if err != nil { @@ -912,12 +968,127 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool { return false } - p.curr.Chunk = newChunk - p.curr.MaxTime = t + p.currMetaWithChunk.Chunk = newChunk + p.currMetaWithChunk.MaxTime = t return true } -func (p *populateWithDelChunkSeriesIterator) At() chunks.Meta { return p.curr } +// populateChunksFromIterable reads the samples from currDelIter to create +// chunks for chunksFromIterable. It also sets p.currMetaWithChunk to the first +// chunk. +func (p *populateWithDelChunkSeriesIterator) populateChunksFromIterable() bool { + p.chunksFromIterable = p.chunksFromIterable[:0] + p.chunksFromIterableIdx = -1 + + firstValueType := p.currDelIter.Next() + if firstValueType == chunkenc.ValNone { + if err := p.currDelIter.Err(); err != nil { + p.err = errors.Wrap(err, "populateChunksFromIterable: no samples could be read") + return false + } + return false + } + + var ( + // t is the timestamp for the current sample. + t int64 + cmint int64 + cmaxt int64 + + currentChunk chunkenc.Chunk + + app chunkenc.Appender + + newChunk chunkenc.Chunk + recoded bool + + err error + ) + + prevValueType := chunkenc.ValNone + + for currentValueType := firstValueType; currentValueType != chunkenc.ValNone; currentValueType = p.currDelIter.Next() { + // Check if the encoding has changed (i.e. we need to create a new + // chunk as chunks can't have multiple encoding types). + // For the first sample, the following condition will always be true as + // ValNoneNone != ValFloat | ValHistogram | ValFloatHistogram. + if currentValueType != prevValueType { + if prevValueType != chunkenc.ValNone { + p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt}) + } + cmint = p.currDelIter.AtT() + if currentChunk, err = currentValueType.NewChunk(); err != nil { + break + } + if app, err = currentChunk.Appender(); err != nil { + break + } + } + + switch currentValueType { + case chunkenc.ValFloat: + { + var v float64 + t, v = p.currDelIter.At() + app.Append(t, v) + } + case chunkenc.ValHistogram: + { + var v *histogram.Histogram + t, v = p.currDelIter.AtHistogram() + // No need to set prevApp as AppendHistogram will set the + // counter reset header for the appender that's returned. + newChunk, recoded, app, err = app.AppendHistogram(nil, t, v, false) + } + case chunkenc.ValFloatHistogram: + { + var v *histogram.FloatHistogram + t, v = p.currDelIter.AtFloatHistogram() + // No need to set prevApp as AppendHistogram will set the + // counter reset header for the appender that's returned. + newChunk, recoded, app, err = app.AppendFloatHistogram(nil, t, v, false) + } + } + + if err != nil { + break + } + + if newChunk != nil { + if !recoded { + p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt}) + } + currentChunk = newChunk + cmint = t + } + + cmaxt = t + prevValueType = currentValueType + } + + if err != nil { + p.err = errors.Wrap(err, "populateChunksFromIterable: error when writing new chunks") + return false + } + if err = p.currDelIter.Err(); err != nil { + p.err = errors.Wrap(err, "populateChunksFromIterable: currDelIter error when writing new chunks") + return false + } + + if prevValueType != chunkenc.ValNone { + p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt}) + } + + if len(p.chunksFromIterable) == 0 { + return false + } + + p.currMetaWithChunk = p.chunksFromIterable[0] + p.chunksFromIterableIdx = 0 + return true +} + +func (p *populateWithDelChunkSeriesIterator) At() chunks.Meta { return p.currMetaWithChunk } // blockSeriesSet allows to iterate over sorted, populated series with applied tombstones. // Series with all deleted chunks are still present as Series with no samples. @@ -1117,8 +1288,8 @@ func newNopChunkReader() ChunkReader { } } -func (cr nopChunkReader) Chunk(chunks.Meta) (chunkenc.Chunk, error) { - return cr.emptyChunk, nil +func (cr nopChunkReader) ChunkOrIterable(chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) { + return cr.emptyChunk, nil, nil } func (cr nopChunkReader) Close() error { return nil } diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 7260d9d8b..ce46a5c12 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -685,12 +685,14 @@ func TestBlockQuerierDelete(t *testing.T) { type fakeChunksReader struct { ChunkReader - chks map[chunks.ChunkRef]chunkenc.Chunk + chks map[chunks.ChunkRef]chunkenc.Chunk + iterables map[chunks.ChunkRef]chunkenc.Iterable } func createFakeReaderAndNotPopulatedChunks(s ...[]chunks.Sample) (*fakeChunksReader, []chunks.Meta) { f := &fakeChunksReader{ - chks: map[chunks.ChunkRef]chunkenc.Chunk{}, + chks: map[chunks.ChunkRef]chunkenc.Chunk{}, + iterables: map[chunks.ChunkRef]chunkenc.Iterable{}, } chks := make([]chunks.Meta, 0, len(s)) @@ -707,21 +709,102 @@ func createFakeReaderAndNotPopulatedChunks(s ...[]chunks.Sample) (*fakeChunksRea return f, chks } -func (r *fakeChunksReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { - chk, ok := r.chks[meta.Ref] - if !ok { - return nil, fmt.Errorf("chunk not found at ref %v", meta.Ref) +// Samples in each slice are assumed to be sorted. +func createFakeReaderAndIterables(s ...[]chunks.Sample) (*fakeChunksReader, []chunks.Meta) { + f := &fakeChunksReader{ + chks: map[chunks.ChunkRef]chunkenc.Chunk{}, + iterables: map[chunks.ChunkRef]chunkenc.Iterable{}, } - return chk, nil + chks := make([]chunks.Meta, 0, len(s)) + + for ref, samples := range s { + f.iterables[chunks.ChunkRef(ref)] = &mockIterable{s: samples} + + var minTime, maxTime int64 + if len(samples) > 0 { + minTime = samples[0].T() + maxTime = samples[len(samples)-1].T() + } + chks = append(chks, chunks.Meta{ + Ref: chunks.ChunkRef(ref), + MinTime: minTime, + MaxTime: maxTime, + }) + } + return f, chks } +func (r *fakeChunksReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) { + if chk, ok := r.chks[meta.Ref]; ok { + return chk, nil, nil + } + + if it, ok := r.iterables[meta.Ref]; ok { + return nil, it, nil + } + return nil, nil, fmt.Errorf("chunk or iterable not found at ref %v", meta.Ref) +} + +type mockIterable struct { + s []chunks.Sample +} + +func (it *mockIterable) Iterator(chunkenc.Iterator) chunkenc.Iterator { + return &mockSampleIterator{ + s: it.s, + idx: -1, + } +} + +type mockSampleIterator struct { + s []chunks.Sample + idx int +} + +func (it *mockSampleIterator) Seek(t int64) chunkenc.ValueType { + for ; it.idx < len(it.s); it.idx++ { + if it.idx != -1 && it.s[it.idx].T() >= t { + return it.s[it.idx].Type() + } + } + + return chunkenc.ValNone +} + +func (it *mockSampleIterator) At() (int64, float64) { + return it.s[it.idx].T(), it.s[it.idx].F() +} + +func (it *mockSampleIterator) AtHistogram() (int64, *histogram.Histogram) { + return it.s[it.idx].T(), it.s[it.idx].H() +} + +func (it *mockSampleIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { + return it.s[it.idx].T(), it.s[it.idx].FH() +} + +func (it *mockSampleIterator) AtT() int64 { + return it.s[it.idx].T() +} + +func (it *mockSampleIterator) Next() chunkenc.ValueType { + if it.idx < len(it.s)-1 { + it.idx++ + return it.s[it.idx].Type() + } + + return chunkenc.ValNone +} + +func (it *mockSampleIterator) Err() error { return nil } + func TestPopulateWithTombSeriesIterators(t *testing.T) { type minMaxTimes struct { minTime, maxTime int64 } cases := []struct { - name string - chks [][]chunks.Sample + name string + samples [][]chunks.Sample expected []chunks.Sample expectedChks []chunks.Meta @@ -732,23 +815,38 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { // Seek being zero means do not test seek. seek int64 seekSuccess bool + + // Set this to true if a sample slice will form multiple chunks. + skipChunkTest bool + + skipIterableTest bool }{ { - name: "no chunk", - chks: [][]chunks.Sample{}, + name: "no chunk", + samples: [][]chunks.Sample{}, }, { - name: "one empty chunk", // This should never happen. - chks: [][]chunks.Sample{{}}, + name: "one empty chunk", // This should never happen. + samples: [][]chunks.Sample{{}}, expectedChks: []chunks.Meta{ assureChunkFromSamples(t, []chunks.Sample{}), }, expectedMinMaxTimes: []minMaxTimes{{0, 0}}, + // iterables with no samples will return no chunks instead of empty chunks + skipIterableTest: true, }, { - name: "three empty chunks", // This should never happen. - chks: [][]chunks.Sample{{}, {}, {}}, + name: "one empty iterable", + samples: [][]chunks.Sample{{}}, + + // iterables with no samples will return no chunks + expectedChks: nil, + skipChunkTest: true, + }, + { + name: "three empty chunks", // This should never happen. + samples: [][]chunks.Sample{{}, {}, {}}, expectedChks: []chunks.Meta{ assureChunkFromSamples(t, []chunks.Sample{}), @@ -756,10 +854,20 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { assureChunkFromSamples(t, []chunks.Sample{}), }, expectedMinMaxTimes: []minMaxTimes{{0, 0}, {0, 0}, {0, 0}}, + // iterables with no samples will return no chunks instead of empty chunks + skipIterableTest: true, + }, + { + name: "three empty iterables", + samples: [][]chunks.Sample{{}, {}, {}}, + + // iterables with no samples will return no chunks + expectedChks: nil, + skipChunkTest: true, }, { name: "one chunk", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, }, @@ -775,7 +883,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "two full chunks", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}}, }, @@ -795,7 +903,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "three full chunks", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}}, {sample{10, 22, nil, nil}, sample{203, 3493, nil, nil}}, @@ -819,15 +927,15 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, // Seek cases. { - name: "three empty chunks and seek", // This should never happen. - chks: [][]chunks.Sample{{}, {}, {}}, - seek: 1, + name: "three empty chunks and seek", // This should never happen. + samples: [][]chunks.Sample{{}, {}, {}}, + seek: 1, seekSuccess: false, }, { name: "two chunks and seek beyond chunks", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}}, }, @@ -837,7 +945,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "two chunks and seek on middle of first chunk", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}}, }, @@ -850,7 +958,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "two chunks and seek before first chunk", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}}, }, @@ -864,12 +972,12 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { // Deletion / Trim cases. { name: "no chunk with deletion interval", - chks: [][]chunks.Sample{}, + samples: [][]chunks.Sample{}, intervals: tombstones.Intervals{{Mint: 20, Maxt: 21}}, }, { name: "two chunks with trimmed first and last samples from edge chunks", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}}, }, @@ -890,7 +998,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "two chunks with trimmed middle sample of first chunk", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}}, }, @@ -911,7 +1019,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "two chunks with deletion across two chunks", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}}, }, @@ -933,7 +1041,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { // Deletion with seek. { name: "two chunks with trimmed first and last samples from edge chunks, seek from middle of first chunk", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}}, }, @@ -945,9 +1053,20 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, }, }, + { + name: "one chunk where all samples are trimmed", + samples: [][]chunks.Sample{ + {sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, + {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}}, + }, + intervals: tombstones.Intervals{{Mint: math.MinInt64, Maxt: 3}}.Add(tombstones.Interval{Mint: 4, Maxt: math.MaxInt64}), + + expected: nil, + expectedChks: nil, + }, { name: "one histogram chunk", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ { sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil}, @@ -973,7 +1092,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "one histogram chunk intersect with earlier deletion interval", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ { sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil}, @@ -996,7 +1115,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "one histogram chunk intersect with later deletion interval", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ { sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil}, @@ -1021,7 +1140,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "one float histogram chunk", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ { sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, @@ -1047,7 +1166,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "one float histogram chunk intersect with earlier deletion interval", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ { sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, @@ -1070,7 +1189,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "one float histogram chunk intersect with later deletion interval", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ { sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, @@ -1095,7 +1214,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "one gauge histogram chunk", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ { sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil}, sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil}, @@ -1121,7 +1240,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "one gauge histogram chunk intersect with earlier deletion interval", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ { sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil}, sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil}, @@ -1144,7 +1263,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "one gauge histogram chunk intersect with later deletion interval", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ { sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil}, sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil}, @@ -1169,7 +1288,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "one gauge float histogram", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ { sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)}, @@ -1195,7 +1314,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "one gauge float histogram chunk intersect with earlier deletion interval", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ { sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)}, @@ -1218,7 +1337,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "one gauge float histogram chunk intersect with later deletion interval", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ { sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)}, @@ -1243,7 +1362,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "three full mixed chunks", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, { sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, @@ -1275,7 +1394,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "three full mixed chunks in different order", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ { sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, @@ -1307,7 +1426,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "three full mixed chunks in different order intersect with deletion interval", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ { sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, @@ -1338,7 +1457,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "three full mixed chunks overlapping", - chks: [][]chunks.Sample{ + samples: [][]chunks.Sample{ { sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{12, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, @@ -1368,11 +1487,237 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, expectedMinMaxTimes: []minMaxTimes{{7, 12}, {11, 16}, {10, 203}}, }, + { + // This case won't actually happen until OOO native histograms is implemented. + // Issue: https://github.com/prometheus/prometheus/issues/11220. + name: "int histogram iterables with counter resets", + samples: [][]chunks.Sample{ + { + sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil}, + sample{8, 0, tsdbutil.GenerateTestHistogram(9), nil}, + // Counter reset should be detected when chunks are created from the iterable. + sample{12, 0, tsdbutil.GenerateTestHistogram(5), nil}, + sample{15, 0, tsdbutil.GenerateTestHistogram(6), nil}, + sample{16, 0, tsdbutil.GenerateTestHistogram(7), nil}, + // Counter reset should be detected when chunks are created from the iterable. + sample{17, 0, tsdbutil.GenerateTestHistogram(5), nil}, + }, + { + sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil}, + sample{19, 0, tsdbutil.GenerateTestHistogram(7), nil}, + // Counter reset should be detected when chunks are created from the iterable. + sample{20, 0, tsdbutil.GenerateTestHistogram(5), nil}, + sample{21, 0, tsdbutil.GenerateTestHistogram(6), nil}, + }, + }, + + expected: []chunks.Sample{ + sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil}, + sample{8, 0, tsdbutil.GenerateTestHistogram(9), nil}, + sample{12, 0, tsdbutil.GenerateTestHistogram(5), nil}, + sample{15, 0, tsdbutil.GenerateTestHistogram(6), nil}, + sample{16, 0, tsdbutil.GenerateTestHistogram(7), nil}, + sample{17, 0, tsdbutil.GenerateTestHistogram(5), nil}, + sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil}, + sample{19, 0, tsdbutil.GenerateTestHistogram(7), nil}, + sample{20, 0, tsdbutil.GenerateTestHistogram(5), nil}, + sample{21, 0, tsdbutil.GenerateTestHistogram(6), nil}, + }, + expectedChks: []chunks.Meta{ + assureChunkFromSamples(t, []chunks.Sample{ + sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil}, + sample{8, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(9)), nil}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{12, 0, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(5)), nil}, + sample{15, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil}, + sample{16, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(7)), nil}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{17, 0, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(5)), nil}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil}, + sample{19, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(7)), nil}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{20, 0, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(5)), nil}, + sample{21, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil}, + }), + }, + expectedMinMaxTimes: []minMaxTimes{ + {7, 8}, + {12, 16}, + {17, 17}, + {18, 19}, + {20, 21}, + }, + + // Skipping chunk test - can't create a single chunk for each + // sample slice since there are counter resets in the middle of + // the slices. + skipChunkTest: true, + }, + { + // This case won't actually happen until OOO native histograms is implemented. + // Issue: https://github.com/prometheus/prometheus/issues/11220. + name: "float histogram iterables with counter resets", + samples: [][]chunks.Sample{ + { + sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}, + sample{8, 0, nil, tsdbutil.GenerateTestFloatHistogram(9)}, + // Counter reset should be detected when chunks are created from the iterable. + sample{12, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)}, + sample{15, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, + sample{16, 0, nil, tsdbutil.GenerateTestFloatHistogram(7)}, + // Counter reset should be detected when chunks are created from the iterable. + sample{17, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)}, + }, + { + sample{18, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, + sample{19, 0, nil, tsdbutil.GenerateTestFloatHistogram(7)}, + // Counter reset should be detected when chunks are created from the iterable. + sample{20, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)}, + sample{21, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, + }, + }, + + expected: []chunks.Sample{ + sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}, + sample{8, 0, nil, tsdbutil.GenerateTestFloatHistogram(9)}, + sample{12, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)}, + sample{15, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, + sample{16, 0, nil, tsdbutil.GenerateTestFloatHistogram(7)}, + sample{17, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)}, + sample{18, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, + sample{19, 0, nil, tsdbutil.GenerateTestFloatHistogram(7)}, + sample{20, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)}, + sample{21, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, + }, + expectedChks: []chunks.Meta{ + assureChunkFromSamples(t, []chunks.Sample{ + sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}, + sample{8, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(9))}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{12, 0, nil, tsdbutil.SetFloatHistogramCounterReset(tsdbutil.GenerateTestFloatHistogram(5))}, + sample{15, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))}, + sample{16, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(7))}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{17, 0, nil, tsdbutil.SetFloatHistogramCounterReset(tsdbutil.GenerateTestFloatHistogram(5))}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{18, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, + sample{19, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(7))}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{20, 0, nil, tsdbutil.SetFloatHistogramCounterReset(tsdbutil.GenerateTestFloatHistogram(5))}, + sample{21, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))}, + }), + }, + expectedMinMaxTimes: []minMaxTimes{ + {7, 8}, + {12, 16}, + {17, 17}, + {18, 19}, + {20, 21}, + }, + + // Skipping chunk test - can't create a single chunk for each + // sample slice since there are counter resets in the middle of + // the slices. + skipChunkTest: true, + }, + { + // This case won't actually happen until OOO native histograms is implemented. + // Issue: https://github.com/prometheus/prometheus/issues/11220. + name: "iterables with mixed encodings and counter resets", + samples: [][]chunks.Sample{ + { + sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil}, + sample{8, 0, tsdbutil.GenerateTestHistogram(9), nil}, + sample{9, 0, nil, tsdbutil.GenerateTestFloatHistogram(10)}, + sample{10, 0, nil, tsdbutil.GenerateTestFloatHistogram(11)}, + sample{11, 0, nil, tsdbutil.GenerateTestFloatHistogram(12)}, + sample{12, 13, nil, nil}, + sample{13, 14, nil, nil}, + sample{14, 0, tsdbutil.GenerateTestHistogram(8), nil}, + // Counter reset should be detected when chunks are created from the iterable. + sample{15, 0, tsdbutil.GenerateTestHistogram(7), nil}, + }, + { + sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil}, + sample{19, 45, nil, nil}, + }, + }, + + expected: []chunks.Sample{ + sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil}, + sample{8, 0, tsdbutil.GenerateTestHistogram(9), nil}, + sample{9, 0, nil, tsdbutil.GenerateTestFloatHistogram(10)}, + sample{10, 0, nil, tsdbutil.GenerateTestFloatHistogram(11)}, + sample{11, 0, nil, tsdbutil.GenerateTestFloatHistogram(12)}, + sample{12, 13, nil, nil}, + sample{13, 14, nil, nil}, + sample{14, 0, tsdbutil.GenerateTestHistogram(8), nil}, + sample{15, 0, tsdbutil.GenerateTestHistogram(7), nil}, + sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil}, + sample{19, 45, nil, nil}, + }, + expectedChks: []chunks.Meta{ + assureChunkFromSamples(t, []chunks.Sample{ + sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil}, + sample{8, 0, tsdbutil.GenerateTestHistogram(9), nil}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{9, 0, nil, tsdbutil.GenerateTestFloatHistogram(10)}, + sample{10, 0, nil, tsdbutil.GenerateTestFloatHistogram(11)}, + sample{11, 0, nil, tsdbutil.GenerateTestFloatHistogram(12)}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{12, 13, nil, nil}, + sample{13, 14, nil, nil}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{14, 0, tsdbutil.GenerateTestHistogram(8), nil}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{15, 0, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(7)), nil}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil}, + }), + assureChunkFromSamples(t, []chunks.Sample{ + sample{19, 45, nil, nil}, + }), + }, + expectedMinMaxTimes: []minMaxTimes{ + {7, 8}, + {9, 11}, + {12, 13}, + {14, 14}, + {15, 15}, + {18, 18}, + {19, 19}, + }, + + skipChunkTest: true, + }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { t.Run("sample", func(t *testing.T) { - f, chkMetas := createFakeReaderAndNotPopulatedChunks(tc.chks...) + var f *fakeChunksReader + var chkMetas []chunks.Meta + // If the test case wants to skip the chunks test, it probably + // means you can't create valid chunks from sample slices, + // therefore create iterables over the samples instead. + if tc.skipChunkTest { + f, chkMetas = createFakeReaderAndIterables(tc.samples...) + } else { + f, chkMetas = createFakeReaderAndNotPopulatedChunks(tc.samples...) + } it := &populateWithDelSeriesIterator{} it.reset(ulid.ULID{}, f, chkMetas, tc.intervals) @@ -1393,7 +1738,35 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { require.Equal(t, tc.expected, r) }) t.Run("chunk", func(t *testing.T) { - f, chkMetas := createFakeReaderAndNotPopulatedChunks(tc.chks...) + if tc.skipChunkTest { + t.Skip() + } + f, chkMetas := createFakeReaderAndNotPopulatedChunks(tc.samples...) + it := &populateWithDelChunkSeriesIterator{} + it.reset(ulid.ULID{}, f, chkMetas, tc.intervals) + + if tc.seek != 0 { + // Chunk iterator does not have Seek method. + return + } + expandedResult, err := storage.ExpandChunks(it) + require.NoError(t, err) + + // We don't care about ref IDs for comparison, only chunk's samples matters. + rmChunkRefs(expandedResult) + rmChunkRefs(tc.expectedChks) + require.Equal(t, tc.expectedChks, expandedResult) + + for i, meta := range expandedResult { + require.Equal(t, tc.expectedMinMaxTimes[i].minTime, meta.MinTime) + require.Equal(t, tc.expectedMinMaxTimes[i].maxTime, meta.MaxTime) + } + }) + t.Run("iterables", func(t *testing.T) { + if tc.skipIterableTest { + t.Skip() + } + f, chkMetas := createFakeReaderAndIterables(tc.samples...) it := &populateWithDelChunkSeriesIterator{} it.reset(ulid.ULID{}, f, chkMetas, tc.intervals) @@ -1686,13 +2059,13 @@ func BenchmarkMergedSeriesSet(b *testing.B) { type mockChunkReader map[chunks.ChunkRef]chunkenc.Chunk -func (cr mockChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { +func (cr mockChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) { chk, ok := cr[meta.Ref] if ok { - return chk, nil + return chk, nil, nil } - return nil, errors.New("Chunk with ref not found") + return nil, nil, errors.New("Chunk with ref not found") } func (cr mockChunkReader) Close() error { @@ -3020,7 +3393,7 @@ func TestBlockBaseSeriesSet(t *testing.T) { idx := tc.expIdxs[i] require.Equal(t, tc.series[idx].lset, bcs.curr.labels) - require.Equal(t, tc.series[idx].chunks, si.chks) + require.Equal(t, tc.series[idx].chunks, si.metas) i++ } diff --git a/tsdb/tsdbutil/histogram.go b/tsdb/tsdbutil/histogram.go index 0327815c4..0847f81a8 100644 --- a/tsdb/tsdbutil/histogram.go +++ b/tsdb/tsdbutil/histogram.go @@ -116,7 +116,17 @@ func SetHistogramNotCounterReset(h *histogram.Histogram) *histogram.Histogram { return h } +func SetHistogramCounterReset(h *histogram.Histogram) *histogram.Histogram { + h.CounterResetHint = histogram.CounterReset + return h +} + func SetFloatHistogramNotCounterReset(h *histogram.FloatHistogram) *histogram.FloatHistogram { h.CounterResetHint = histogram.NotCounterReset return h } + +func SetFloatHistogramCounterReset(h *histogram.FloatHistogram) *histogram.FloatHistogram { + h.CounterResetHint = histogram.CounterReset + return h +} From 1b92fb10de8ebf118f54d70e06c2cde4701a1ae3 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 28 Nov 2023 10:52:27 +0000 Subject: [PATCH 614/632] Improvements to PR template (#13198) I like a bit more of a hint what the PR is supposed to achieve. Also, I took out a suggestion not to write tests. We can decide case-by-case if it's ok not to add a test; no need to encourage it. Signed-off-by: Bryan Boreham --- .github/PULL_REQUEST_TEMPLATE.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 95df72dd0..cf90177b1 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,4 +1,8 @@