From 4ce26288e06ef02c3915baa14e0046445391b406 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 4 Jun 2024 13:54:17 +0100 Subject: [PATCH 01/44] [ENHANCEMENT] HTTP API: Add url to errors logged while sending response Give more clues when troubleshooting. Signed-off-by: Bryan Boreham --- web/api/v1/api.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index f0884926e1..b95ff25cf9 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1761,7 +1761,7 @@ func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface b, err := codec.Encode(resp) if err != nil { - level.Error(api.logger).Log("msg", "error marshaling response", "err", err) + level.Error(api.logger).Log("msg", "error marshaling response", "url", req.URL, "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -1769,7 +1769,7 @@ func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface w.Header().Set("Content-Type", codec.ContentType().String()) w.WriteHeader(http.StatusOK) if n, err := w.Write(b); err != nil { - level.Error(api.logger).Log("msg", "error writing response", "bytesWritten", n, "err", err) + level.Error(api.logger).Log("msg", "error writing response", "url", req.URL, "bytesWritten", n, "err", err) } } From 8397c7bc48bad75a7ab34adb1989dde46fa2952a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Thu, 6 Jun 2024 09:56:36 +0200 Subject: [PATCH 02/44] Version bump to v2.53.0-rc.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- CHANGELOG.md | 2 ++ VERSION | 2 +- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/package.json | 2 +- web/ui/react-app/package.json | 4 ++-- 7 files changed, 16 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6c2c9ae31c..511fa07468 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ## unreleased +## 2.53.0-rc.0 / 2024-06-06 + This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 50. * [CHANGE] Rules: Execute 1 query instead of N (where N is the number of alerts within alert rule) when restoring alerts. #13980 diff --git a/VERSION b/VERSION index e7a1fa2a8c..ae392bf33c 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.52.1 +2.53.0-rc.0 diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index f3f48c95e5..152abc8c7b 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.52.1", + "version": "0.53.0-rc.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.52.1", + "@prometheus-io/lezer-promql": "0.53.0-rc.0", "lru-cache": "^7.18.3" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index fa3be21d73..93486b8dec 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.52.1", + "version": "0.53.0-rc.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 139a24fc6e..d002109ddd 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.52.1", + "version": "0.53.0-rc.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.52.1", + "version": "0.53.0-rc.0", "workspaces": [ "react-app", "module/*" @@ -30,10 +30,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.52.1", + "version": "0.53.0-rc.0", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.52.1", + "@prometheus-io/lezer-promql": "0.53.0-rc.0", "lru-cache": "^7.18.3" }, "devDependencies": { @@ -69,7 +69,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.52.1", + "version": "0.53.0-rc.0", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.5.1", @@ -19233,7 +19233,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.52.1", + "version": "0.53.0-rc.0", "dependencies": { "@codemirror/autocomplete": "^6.11.1", "@codemirror/commands": "^6.3.2", @@ -19251,7 +19251,7 @@ "@lezer/lr": "^1.3.14", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.52.1", + "@prometheus-io/codemirror-promql": "0.53.0-rc.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.2", diff --git a/web/ui/package.json b/web/ui/package.json index 407b97f6c4..4c9ce03e4e 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -28,5 +28,5 @@ "ts-jest": "^29.1.1", "typescript": "^4.9.5" }, - "version": "0.52.1" + "version": "0.53.0-rc.0" } diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 4bc667b0f0..d21cf3db5b 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.52.1", + "version": "0.53.0-rc.0", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.11.1", @@ -19,7 +19,7 @@ "@lezer/lr": "^1.3.14", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.52.1", + "@prometheus-io/codemirror-promql": "0.53.0-rc.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.2", From b59034ec3190462abc921b93d5822943cecbb386 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Thu, 6 Jun 2024 12:29:34 +0200 Subject: [PATCH 03/44] Ammend changelog with missing user impact entries MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- CHANGELOG.md | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 511fa07468..4b3f1980b0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,14 +6,21 @@ This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 50. -* [CHANGE] Rules: Execute 1 query instead of N (where N is the number of alerts within alert rule) when restoring alerts. #13980 +* [CHANGE] Rules: Execute 1 query instead of N (where N is the number of alerts within alert rule) when restoring alerts. #13980 #14048 * [CHANGE] Runtime: Change GOGC threshold from 100 to 50 #14176 -* [FEATURE] Rules: Add new option `query_offset` for each rule group via rule group configuration file and `rule_query_offset` as part of the global configuration to have more resilience for remote write delays. #14061 -* [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` to measure the time it takes to restore a rule group. #13974 +* [FEATURE] Rules: Add new option `query_offset` for each rule group via rule group configuration file and `rule_query_offset` as part of the global configuration to have more resilience for remote write delays. #14061 #14216 +* [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` metric to measure the time it takes to restore a rule group. #13974 * [ENHANCEMENT] OTLP: Improve remote write format translation performance by using label set hashes for metric identifiers instead of string based ones. #14006 #13991 * [ENHANCEMENT] TSDB: Optimize querying with regexp matchers. #13620 * [BUGFIX] OTLP: Don't generate target_info unless at least one identifying label is defined. #13991 * [BUGFIX] OTLP: Don't generate target_info unless there are metrics. #13991 +* [BUGFIX] Native histograms: dDcouple native histogram ingestions and protobuf parsing that lead to errors when using created timestamp feature. #13987 +* [BUGFIX] Scaleway SD: Use the instance's public IP if no private IP is available as the `__address__` meta label. #13941 +* [BUGFIX] Query logger: Do not leak file descriptors on error. #13948 +* [BUGFIX] TSDB: Let queries with heavy regex matches be cancelled and not use up the CPU. #14096 #14103 #14118 #14199 +* [BUGFIX] UI: Allow users to opt-out of the multi-cluster setup for the main Prometheus dashboard, in environments where it isn't applicable. #14062 +* [BUGFIX] API: Do not warn if result count is equal to the limit, only when exceeding the limit for the series, label-names and label-values APIs. #14116 +* [BUGFIX] TSDB: Fix head stats and hooks when replaying a corrupted snapshot. #14079 ## 2.52.1 / 2024-05-29 From 73f74d301e5410880f8f977afbef754e33fae9e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Thu, 6 Jun 2024 12:29:49 +0200 Subject: [PATCH 04/44] Clarify action to take with regards to the changelog MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- RELEASE.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/RELEASE.md b/RELEASE.md index f313c4172d..f9a42be6b8 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -149,6 +149,8 @@ Changes for a patch release or release candidate should be merged into the previ Bump the version in the `VERSION` file and update `CHANGELOG.md`. Do this in a proper PR pointing to the release branch as this gives others the opportunity to chime in on the release in general and on the addition to the changelog in particular. For a release candidate, append something like `-rc.0` to the version (with the corresponding changes to the tag name, the release name etc.). +When updating the `CHANGELOG.md` look at all PRs included in the release since the last release and verify if they need a changelog entry. + Note that `CHANGELOG.md` should only document changes relevant to users of Prometheus, including external API changes, performance improvements, and new features. Do not document changes of internal interfaces, code refactorings and clean-ups, changes to the build process, etc. People interested in these are asked to refer to the git history. For release candidates still update `CHANGELOG.md`, but when you cut the final release later, merge all the changes from the pre-releases into the one final update. From c8de725abaf6058787b389ab4260f34cbeb21069 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Thu, 6 Jun 2024 12:31:03 +0200 Subject: [PATCH 05/44] Fix typo in changelog MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b3f1980b0..225fb70480 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,7 @@ This release changes the default for GOGC, the Go runtime control for the trade- * [ENHANCEMENT] TSDB: Optimize querying with regexp matchers. #13620 * [BUGFIX] OTLP: Don't generate target_info unless at least one identifying label is defined. #13991 * [BUGFIX] OTLP: Don't generate target_info unless there are metrics. #13991 -* [BUGFIX] Native histograms: dDcouple native histogram ingestions and protobuf parsing that lead to errors when using created timestamp feature. #13987 +* [BUGFIX] Native histograms: Decouple native histogram ingestions and protobuf parsing that lead to errors when using created timestamp feature. #13987 * [BUGFIX] Scaleway SD: Use the instance's public IP if no private IP is available as the `__address__` meta label. #13941 * [BUGFIX] Query logger: Do not leak file descriptors on error. #13948 * [BUGFIX] TSDB: Let queries with heavy regex matches be cancelled and not use up the CPU. #14096 #14103 #14118 #14199 From c7fdfe800462d0a4f3eecd12d67dcd50065fb3c9 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 6 Jun 2024 17:47:38 +0200 Subject: [PATCH 06/44] promql: Add tests for histogram counter reset only in bucket This also exercises the "fast path" (only decoding count and sum), i.e. where the counter reset isn't visible at all in the decoded data. Signed-off-by: beorn7 --- .../promqltest/testdata/native_histograms.test | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 37818e4f88..f79517023c 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -714,3 +714,20 @@ eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_4) eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_4) {} 1 + +clear + +# Counter reset only noticeable in a single bucket. +load 5m + reset_in_bucket {{schema:0 count:4 sum:5 buckets:[1 2 1]}} {{schema:0 count:5 sum:6 buckets:[1 1 3]}} {{schema:0 count:6 sum:7 buckets:[1 2 3]}} + +eval instant at 10m increase(reset_in_bucket[15m]) + {} {{count:9 sum:10.5 buckets:[1.5 3 4.5]}} + +# The following two test the "fast path" where only sum and count is decoded. +eval instant at 10m histogram_count(increase(reset_in_bucket[15m])) + {} 9 + +eval instant at 10m histogram_sum(increase(reset_in_bucket[15m])) + {} 10.5 + From 3feefd903b2265373272d3ed443c76571ea45008 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Thu, 6 Jun 2024 17:54:04 +0200 Subject: [PATCH 07/44] Update changelog from review comments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- CHANGELOG.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 225fb70480..754386bd00 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,13 +12,11 @@ This release changes the default for GOGC, the Go runtime control for the trade- * [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` metric to measure the time it takes to restore a rule group. #13974 * [ENHANCEMENT] OTLP: Improve remote write format translation performance by using label set hashes for metric identifiers instead of string based ones. #14006 #13991 * [ENHANCEMENT] TSDB: Optimize querying with regexp matchers. #13620 -* [BUGFIX] OTLP: Don't generate target_info unless at least one identifying label is defined. #13991 -* [BUGFIX] OTLP: Don't generate target_info unless there are metrics. #13991 -* [BUGFIX] Native histograms: Decouple native histogram ingestions and protobuf parsing that lead to errors when using created timestamp feature. #13987 +* [BUGFIX] OTLP: Don't generate target_info unless there are metrics and at least one identifying label is defined. #13991 +* [BUGFIX] Scrape: Do no try to ingest native histograms when the native histograms feature is turned off. This happened when protobuf scrape was enabled by for example the created time feature. #13987 * [BUGFIX] Scaleway SD: Use the instance's public IP if no private IP is available as the `__address__` meta label. #13941 * [BUGFIX] Query logger: Do not leak file descriptors on error. #13948 * [BUGFIX] TSDB: Let queries with heavy regex matches be cancelled and not use up the CPU. #14096 #14103 #14118 #14199 -* [BUGFIX] UI: Allow users to opt-out of the multi-cluster setup for the main Prometheus dashboard, in environments where it isn't applicable. #14062 * [BUGFIX] API: Do not warn if result count is equal to the limit, only when exceeding the limit for the series, label-names and label-values APIs. #14116 * [BUGFIX] TSDB: Fix head stats and hooks when replaying a corrupted snapshot. #14079 From 1d2f2cb43da031f91947682a90c02062665a386f Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Thu, 6 Jun 2024 19:47:36 +0200 Subject: [PATCH 08/44] Fix Group.Equals() to take in account the new queryOffset too (#14273) Signed-off-by: Marco Pracucci --- rules/group.go | 4 ++ rules/group_test.go | 98 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+) create mode 100644 rules/group_test.go diff --git a/rules/group.go b/rules/group.go index 9ae89789d0..c0ad18c187 100644 --- a/rules/group.go +++ b/rules/group.go @@ -793,6 +793,10 @@ func (g *Group) Equals(ng *Group) bool { return false } + if ((g.queryOffset == nil) != (ng.queryOffset == nil)) || (g.queryOffset != nil && ng.queryOffset != nil && *g.queryOffset != *ng.queryOffset) { + return false + } + if len(g.rules) != len(ng.rules) { return false } diff --git a/rules/group_test.go b/rules/group_test.go new file mode 100644 index 0000000000..ff1ef3d6c1 --- /dev/null +++ b/rules/group_test.go @@ -0,0 +1,98 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestGroup_Equals(t *testing.T) { + tests := map[string]struct { + first *Group + second *Group + expected bool + }{ + "no query offset set on both groups": { + first: &Group{ + name: "group-1", + file: "file-1", + interval: time.Minute, + }, + second: &Group{ + name: "group-1", + file: "file-1", + interval: time.Minute, + }, + expected: true, + }, + "query offset set only on the first group": { + first: &Group{ + name: "group-1", + file: "file-1", + interval: time.Minute, + queryOffset: pointerOf[time.Duration](time.Minute), + }, + second: &Group{ + name: "group-1", + file: "file-1", + interval: time.Minute, + }, + expected: false, + }, + "query offset set on both groups to the same value": { + first: &Group{ + name: "group-1", + file: "file-1", + interval: time.Minute, + queryOffset: pointerOf[time.Duration](time.Minute), + }, + second: &Group{ + name: "group-1", + file: "file-1", + interval: time.Minute, + queryOffset: pointerOf[time.Duration](time.Minute), + }, + expected: true, + }, + "query offset set on both groups to different value": { + first: &Group{ + name: "group-1", + file: "file-1", + interval: time.Minute, + queryOffset: pointerOf[time.Duration](time.Minute), + }, + second: &Group{ + name: "group-1", + file: "file-1", + interval: time.Minute, + queryOffset: pointerOf[time.Duration](2 * time.Minute), + }, + expected: false, + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + require.Equal(t, testData.expected, testData.first.Equals(testData.second)) + require.Equal(t, testData.expected, testData.second.Equals(testData.first)) + }) + } +} + +func pointerOf[T any](value T) *T { + return &value +} From dd4400146521c996239da57d2a225a608e3915cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Fri, 7 Jun 2024 10:21:27 +0200 Subject: [PATCH 09/44] Update changelog due to pr 14273 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 754386bd00..0bc3abd71e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,7 @@ This release changes the default for GOGC, the Go runtime control for the trade- * [CHANGE] Rules: Execute 1 query instead of N (where N is the number of alerts within alert rule) when restoring alerts. #13980 #14048 * [CHANGE] Runtime: Change GOGC threshold from 100 to 50 #14176 -* [FEATURE] Rules: Add new option `query_offset` for each rule group via rule group configuration file and `rule_query_offset` as part of the global configuration to have more resilience for remote write delays. #14061 #14216 +* [FEATURE] Rules: Add new option `query_offset` for each rule group via rule group configuration file and `rule_query_offset` as part of the global configuration to have more resilience for remote write delays. #14061 #14216 #14273 * [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` metric to measure the time it takes to restore a rule group. #13974 * [ENHANCEMENT] OTLP: Improve remote write format translation performance by using label set hashes for metric identifiers instead of string based ones. #14006 #13991 * [ENHANCEMENT] TSDB: Optimize querying with regexp matchers. #13620 From d0d361da53eb825fc2207fa483b2672c0a6da2f8 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Fri, 7 Jun 2024 16:09:53 +0200 Subject: [PATCH 10/44] headIndexReader.LabelNamesFor: skip not found series It's quite common during the compaction cycle to hold series IDs for series that aren't in the TSDB head anymore. We shouldn't fail if that happens, as the caller has no way to figure out which one of the IDs doesn't exist. Fixes https://github.com/prometheus/prometheus/issues/14278 Signed-off-by: Oleg Zaytsev --- tsdb/head_read.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tsdb/head_read.go b/tsdb/head_read.go index df15abcd50..c53e10956b 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -277,7 +277,9 @@ func (h *headIndexReader) LabelNamesFor(ctx context.Context, ids ...storage.Seri } memSeries := h.head.series.getByID(chunks.HeadSeriesRef(id)) if memSeries == nil { - return nil, storage.ErrNotFound + // Series not found, this happens during compaction, + // when series was garbage collected after the caller got the series IDs. + continue } memSeries.lset.Range(func(lbl labels.Label) { namesMap[lbl.Name] = struct{}{} From c92a5773b27a0ec07e3f523f57a1869f8532c634 Mon Sep 17 00:00:00 2001 From: parnavh Date: Sat, 8 Jun 2024 20:23:46 +0530 Subject: [PATCH 11/44] fix: broken link on github mobile Signed-off-by: parnavh --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 023619a781..cd14ed2ecb 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@

- Prometheus
Prometheus + Prometheus
Prometheus

-

Visit prometheus.io for the full documentation, +

Visit prometheus.io for the full documentation, examples and guides.

From 2dc177d8afce72529213cc76bb1d62a125bdd60b Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Mon, 10 Jun 2024 14:23:22 +0200 Subject: [PATCH 12/44] `MemPostings.Delete()`: reduce locking/unlocking (#13286) * MemPostings: reduce locking/unlocking MemPostings.Delete is called from Head.gc(), i.e. it gets the IDs of the series that have churned. I'd assume that many label values aren't affected by that churn at all, so it doesn't make sense to touch the lock while checking them. Signed-off-by: Oleg Zaytsev --- tsdb/index/postings.go | 57 ++++++++++++++++------ tsdb/index/postings_test.go | 97 +++++++++++++++++++++++++++++++++++++ 2 files changed, 139 insertions(+), 15 deletions(-) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 159f6416e2..937d1287b8 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -289,41 +289,67 @@ func (p *MemPostings) EnsureOrder(numberOfConcurrentProcesses int) { // Delete removes all ids in the given map from the postings lists. func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}) { - var keys, vals []string + // We will take an optimistic read lock for the entire method, + // and only lock for writing when we actually find something to delete. + // + // Each SeriesRef can appear in several Postings. + // To change each one, we need to know the label name and value that it is indexed under. + // We iterate over all label names, then for each name all values, + // and look for individual series to be deleted. + p.mtx.RLock() + defer p.mtx.RUnlock() // Collect all keys relevant for deletion once. New keys added afterwards // can by definition not be affected by any of the given deletes. - p.mtx.RLock() + keys := make([]string, 0, len(p.m)) + maxVals := 0 for n := range p.m { keys = append(keys, n) + if len(p.m[n]) > maxVals { + maxVals = len(p.m[n]) + } } - p.mtx.RUnlock() + vals := make([]string, 0, maxVals) for _, n := range keys { - p.mtx.RLock() + // Copy the values and iterate the copy: if we unlock in the loop below, + // another goroutine might modify the map while we are part-way through it. vals = vals[:0] for v := range p.m[n] { vals = append(vals, v) } - p.mtx.RUnlock() // For each posting we first analyse whether the postings list is affected by the deletes. - // If yes, we actually reallocate a new postings list. - for _, l := range vals { - // Only lock for processing one postings list so we don't block reads for too long. - p.mtx.Lock() - + // If no, we remove the label value from the vals list. + // This way we only need to Lock once later. + for i := 0; i < len(vals); { found := false - for _, id := range p.m[n][l] { + refs := p.m[n][vals[i]] + for _, id := range refs { if _, ok := deleted[id]; ok { + i++ found = true break } } + if !found { - p.mtx.Unlock() - continue + // Didn't match, bring the last value to this position, make the slice shorter and check again. + // The order of the slice doesn't matter as it comes from a map iteration. + vals[i], vals = vals[len(vals)-1], vals[:len(vals)-1] } + } + + // If no label values have deleted ids, just continue. + if len(vals) == 0 { + continue + } + + // The only vals left here are the ones that contain deleted ids. + // Now we take the write lock and remove the ids. + p.mtx.RUnlock() + p.mtx.Lock() + for _, l := range vals { repl := make([]storage.SeriesRef, 0, len(p.m[n][l])) for _, id := range p.m[n][l] { @@ -336,13 +362,14 @@ func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}) { } else { delete(p.m[n], l) } - p.mtx.Unlock() } - p.mtx.Lock() + + // Delete the key if we removed all values. if len(p.m[n]) == 0 { delete(p.m, n) } p.mtx.Unlock() + p.mtx.RLock() } } diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index 2cbc14ac64..562aef457e 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -23,6 +23,7 @@ import ( "sort" "strconv" "strings" + "sync" "testing" "github.com/grafana/regexp" @@ -1001,6 +1002,102 @@ func TestMemPostings_Delete(t *testing.T) { require.Empty(t, expanded, "expected empty postings, got %v", expanded) } +// BenchmarkMemPostings_Delete is quite heavy, so consider running it with +// -benchtime=10x or similar to get more stable and comparable results. +func BenchmarkMemPostings_Delete(b *testing.B) { + internedItoa := map[int]string{} + var mtx sync.RWMutex + itoa := func(i int) string { + mtx.RLock() + s, ok := internedItoa[i] + mtx.RUnlock() + if ok { + return s + } + mtx.Lock() + s = strconv.Itoa(i) + internedItoa[i] = s + mtx.Unlock() + return s + } + + const total = 1e6 + prepare := func() *MemPostings { + var ref storage.SeriesRef + next := func() storage.SeriesRef { + ref++ + return ref + } + + p := NewMemPostings() + nameValues := make([]string, 0, 100) + for i := 0; i < total; i++ { + nameValues = nameValues[:0] + + // A thousand labels like lbl_x_of_1000, each with total/1000 values + thousand := "lbl_" + itoa(i%1000) + "_of_1000" + nameValues = append(nameValues, thousand, itoa(i/1000)) + // A hundred labels like lbl_x_of_100, each with total/100 values. + hundred := "lbl_" + itoa(i%100) + "_of_100" + nameValues = append(nameValues, hundred, itoa(i/100)) + + if i < 100 { + ten := "lbl_" + itoa(i%10) + "_of_10" + nameValues = append(nameValues, ten, itoa(i%10)) + } + + p.Add(next(), labels.FromStrings(append(nameValues, "first", "a", "second", "a", "third", "a")...)) + } + return p + } + + for _, refs := range []int{1, 100, 10_000} { + b.Run(fmt.Sprintf("refs=%d", refs), func(b *testing.B) { + for _, reads := range []int{0, 1, 10} { + b.Run(fmt.Sprintf("readers=%d", reads), func(b *testing.B) { + if b.N > total/refs { + // Just to make sure that benchmark still makes sense. + panic("benchmark not prepared") + } + + p := prepare() + stop := make(chan struct{}) + wg := sync.WaitGroup{} + for i := 0; i < reads; i++ { + wg.Add(1) + go func(i int) { + lbl := "lbl_" + itoa(i) + "_of_100" + defer wg.Done() + for { + select { + case <-stop: + return + default: + // Get a random value of this label. + p.Get(lbl, itoa(rand.Intn(10000))).Next() + } + } + }(i) + } + b.Cleanup(func() { + close(stop) + wg.Wait() + }) + + b.ResetTimer() + for n := 0; n < b.N; n++ { + deleted := map[storage.SeriesRef]struct{}{} + for i := 0; i < refs; i++ { + deleted[storage.SeriesRef(n*refs+i)] = struct{}{} + } + p.Delete(deleted) + } + }) + } + }) + } +} + func TestFindIntersectingPostings(t *testing.T) { t.Run("multiple intersections", func(t *testing.T) { p := NewListPostings([]storage.SeriesRef{10, 15, 20, 25, 30, 35, 40, 45, 50}) From 10a3c7220b66939d85c3c860e8c97e797bda9d46 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Mon, 10 Jun 2024 14:24:17 +0200 Subject: [PATCH 13/44] `MemPostings.PostingsForLabelMatching()`: don't hold the mutex while matching (#14286) * MemPostings.PostingsForLabelMatching: let mutex go This changes the `MemPostings.PostingsForLabelMatching` implementation to stop holding the read mutex while matching the label values. We've seen that this method can be slow when the matcher is expensive, that's why we even added a context expiration check. However, there are critical process that might be waiting on this mutex: writes (adding new series) and compaction (deleting the garbage-collected ones), so we should avoid holding it for a long period of time. Given that we've copied the values to a slice anyway, there's no need to hold the lock while matching. Signed-off-by: Oleg Zaytsev --- tsdb/index/postings.go | 74 ++++++++++++++++++++++++++----------- tsdb/index/postings_test.go | 22 +++++++++++ 2 files changed, 75 insertions(+), 21 deletions(-) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 937d1287b8..6b654f6b5b 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -425,16 +425,62 @@ func (p *MemPostings) addFor(id storage.SeriesRef, l labels.Label) { } func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string, match func(string) bool) Postings { - p.mtx.RLock() + // We'll copy the values into a slice and then match over that, + // this way we don't need to hold the mutex while we're matching, + // which can be slow (seconds) if the match function is a huge regex. + // Holding this lock prevents new series from being added (slows down the write path) + // and blocks the compaction process. + vals := p.labelValues(name) + for i, count := 0, 1; i < len(vals); count++ { + if count%checkContextEveryNIterations == 0 && ctx.Err() != nil { + return ErrPostings(ctx.Err()) + } - e := p.m[name] - if len(e) == 0 { - p.mtx.RUnlock() + if match(vals[i]) { + i++ + continue + } + + // Didn't match, bring the last value to this position, make the slice shorter and check again. + // The order of the slice doesn't matter as it comes from a map iteration. + vals[i], vals = vals[len(vals)-1], vals[:len(vals)-1] + } + + // If none matched (or this label had no values), no need to grab the lock again. + if len(vals) == 0 { return EmptyPostings() } - // Benchmarking shows that first copying the values into a slice and then matching over that is - // faster than matching over the map keys directly, at least on AMD64. + // Now `vals` only contains the values that matched, get their postings. + its := make([]Postings, 0, len(vals)) + p.mtx.RLock() + e := p.m[name] + for _, v := range vals { + if refs, ok := e[v]; ok { + // Some of the values may have been garbage-collected in the meantime this is fine, we'll just skip them. + // If we didn't let the mutex go, we'd have these postings here, but they would be pointing nowhere + // because there would be a `MemPostings.Delete()` call waiting for the lock to delete these labels, + // because the series were deleted already. + its = append(its, NewListPostings(refs)) + } + } + // Let the mutex go before merging. + p.mtx.RUnlock() + + return Merge(ctx, its...) +} + +// labelValues returns a slice of label values for the given label name. +// It will take the read lock. +func (p *MemPostings) labelValues(name string) []string { + p.mtx.RLock() + defer p.mtx.RUnlock() + + e := p.m[name] + if len(e) == 0 { + return nil + } + vals := make([]string, 0, len(e)) for v, srs := range e { if len(srs) > 0 { @@ -442,21 +488,7 @@ func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string, } } - var its []Postings - count := 1 - for _, v := range vals { - if count%checkContextEveryNIterations == 0 && ctx.Err() != nil { - p.mtx.RUnlock() - return ErrPostings(ctx.Err()) - } - count++ - if match(v) { - its = append(its, NewListPostings(e[v])) - } - } - p.mtx.RUnlock() - - return Merge(ctx, its...) + return vals } // ExpandPostings returns the postings expanded as a slice. diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index 562aef457e..4f34cc47ea 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -1435,6 +1435,28 @@ func BenchmarkMemPostings_PostingsForLabelMatching(b *testing.B) { } } +func TestMemPostings_PostingsForLabelMatching(t *testing.T) { + mp := NewMemPostings() + mp.Add(1, labels.FromStrings("foo", "1")) + mp.Add(2, labels.FromStrings("foo", "2")) + mp.Add(3, labels.FromStrings("foo", "3")) + mp.Add(4, labels.FromStrings("foo", "4")) + + isEven := func(v string) bool { + iv, err := strconv.Atoi(v) + if err != nil { + panic(err) + } + return iv%2 == 0 + } + + p := mp.PostingsForLabelMatching(context.Background(), "foo", isEven) + require.NoError(t, p.Err()) + refs, err := ExpandPostings(p) + require.NoError(t, err) + require.Equal(t, []storage.SeriesRef{2, 4}, refs) +} + func TestMemPostings_PostingsForLabelMatchingHonorsContextCancel(t *testing.T) { memP := NewMemPostings() seriesCount := 10 * checkContextEveryNIterations From 19fd5212c37aba8fb4b0642f9acfdc8c7ccc7681 Mon Sep 17 00:00:00 2001 From: Rens Groothuijsen Date: Mon, 10 Jun 2024 20:16:02 +0200 Subject: [PATCH 14/44] docs: clarify default Docker command line parameters (#14194) * docs: clarify default Docker command line parameters Signed-off-by: Rens Groothuijsen * docs: move Docker command line parameters section and refer to Dockerfile Signed-off-by: Rens Groothuijsen * Add link to Dockerfile in documentation Co-authored-by: Ayoub Mrini Signed-off-by: Rens Groothuijsen --------- Signed-off-by: Rens Groothuijsen Co-authored-by: Ayoub Mrini --- docs/installation.md | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/docs/installation.md b/docs/installation.md index 28f64c0f95..c8e359e780 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -31,11 +31,19 @@ production deployments it is highly recommended to use a [named volume](https://docs.docker.com/storage/volumes/) to ease managing the data on Prometheus upgrades. -To provide your own configuration, there are several options. Here are -two examples. +### Setting command line parameters + +The Docker image is started with a number of default command line parameters, which +can be found in the [Dockerfile](https://github.com/prometheus/prometheus/blob/main/Dockerfile) (adjust the link to correspond with the version in use). + +If you want to add extra command line parameters to the `docker run` command, +you will need to re-add these yourself as they will be overwritten. ### Volumes & bind-mount +To provide your own configuration, there are several options. Here are +two examples. + Bind-mount your `prometheus.yml` from the host by running: ```bash From 5a5a6f08ef1437e4a0493d9275cb7181d92a1450 Mon Sep 17 00:00:00 2001 From: Sergey <83376337+freak12techno@users.noreply.github.com> Date: Mon, 10 Jun 2024 21:40:11 +0300 Subject: [PATCH 15/44] chore: use HumanizeDuration from prometheus/common (#14202) * chore: use HumanizeDuration from prometheus/common Signed-off-by: Sergey * chore: fixed linting Signed-off-by: Sergey * chore: review fixes --------- Signed-off-by: Sergey --- template/template.go | 48 +++----------------------------------------- 1 file changed, 3 insertions(+), 45 deletions(-) diff --git a/template/template.go b/template/template.go index 43772805cd..dbe1607cfa 100644 --- a/template/template.go +++ b/template/template.go @@ -32,6 +32,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + common_templates "github.com/prometheus/common/helpers/templates" + "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/util/strutil" ) @@ -263,51 +265,7 @@ func NewTemplateExpander( } return fmt.Sprintf("%.4g%s", v, prefix), nil }, - "humanizeDuration": func(i interface{}) (string, error) { - v, err := convertToFloat(i) - if err != nil { - return "", err - } - if math.IsNaN(v) || math.IsInf(v, 0) { - return fmt.Sprintf("%.4g", v), nil - } - if v == 0 { - return fmt.Sprintf("%.4gs", v), nil - } - if math.Abs(v) >= 1 { - sign := "" - if v < 0 { - sign = "-" - v = -v - } - duration := int64(v) - seconds := duration % 60 - minutes := (duration / 60) % 60 - hours := (duration / 60 / 60) % 24 - days := duration / 60 / 60 / 24 - // For days to minutes, we display seconds as an integer. - if days != 0 { - return fmt.Sprintf("%s%dd %dh %dm %ds", sign, days, hours, minutes, seconds), nil - } - if hours != 0 { - return fmt.Sprintf("%s%dh %dm %ds", sign, hours, minutes, seconds), nil - } - if minutes != 0 { - return fmt.Sprintf("%s%dm %ds", sign, minutes, seconds), nil - } - // For seconds, we display 4 significant digits. - return fmt.Sprintf("%s%.4gs", sign, v), nil - } - prefix := "" - for _, p := range []string{"m", "u", "n", "p", "f", "a", "z", "y"} { - if math.Abs(v) >= 1 { - break - } - prefix = p - v *= 1000 - } - return fmt.Sprintf("%.4g%ss", v, prefix), nil - }, + "humanizeDuration": common_templates.HumanizeDuration, "humanizePercentage": func(i interface{}) (string, error) { v, err := convertToFloat(i) if err != nil { From 39902ba694628fec9fefc9be681e5dfa9f96b26f Mon Sep 17 00:00:00 2001 From: Ranveer Avhad <46259310+Ranveer777@users.noreply.github.com> Date: Tue, 11 Jun 2024 04:01:41 +0530 Subject: [PATCH 16/44] [BUGFIX] FastRegexpMatcher: do Unicode normalization as part of case-insensitive comparison (#14170) * Converted string to standarized form * Added golang.org/x/text in Go dependencies * Added test cases for FastRegexMatcher * Added benchmark for toNormalizedLower Signed-off-by: RA --- go.mod | 2 +- model/labels/regexp.go | 44 ++++++++++++++++++++++++-- model/labels/regexp_test.go | 63 ++++++++++++++++++++++++++++++++++++- 3 files changed, 105 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 8caf80727b..ac8b4f469d 100644 --- a/go.mod +++ b/go.mod @@ -77,6 +77,7 @@ require ( golang.org/x/oauth2 v0.21.0 golang.org/x/sync v0.7.0 golang.org/x/sys v0.21.0 + golang.org/x/text v0.16.0 golang.org/x/time v0.5.0 golang.org/x/tools v0.22.0 google.golang.org/api v0.183.0 @@ -188,7 +189,6 @@ require ( golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/mod v0.18.0 // indirect golang.org/x/term v0.21.0 // indirect - golang.org/x/text v0.16.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/model/labels/regexp.go b/model/labels/regexp.go index f228d7ff1f..1f3f15eb07 100644 --- a/model/labels/regexp.go +++ b/model/labels/regexp.go @@ -16,10 +16,12 @@ package labels import ( "slices" "strings" + "unicode" "unicode/utf8" "github.com/grafana/regexp" "github.com/grafana/regexp/syntax" + "golang.org/x/text/unicode/norm" ) const ( @@ -766,7 +768,7 @@ type equalMultiStringMapMatcher struct { func (m *equalMultiStringMapMatcher) add(s string) { if !m.caseSensitive { - s = strings.ToLower(s) + s = toNormalisedLower(s) } m.values[s] = struct{}{} @@ -786,13 +788,51 @@ func (m *equalMultiStringMapMatcher) setMatches() []string { func (m *equalMultiStringMapMatcher) Matches(s string) bool { if !m.caseSensitive { - s = strings.ToLower(s) + s = toNormalisedLower(s) } _, ok := m.values[s] return ok } +// toNormalisedLower normalise the input string using "Unicode Normalization Form D" and then convert +// it to lower case. +func toNormalisedLower(s string) string { + // Check if the string is all ASCII chars and convert any upper case character to lower case character. + isASCII := true + var ( + b strings.Builder + pos int + ) + b.Grow(len(s)) + for i := 0; i < len(s); i++ { + c := s[i] + if isASCII && c >= utf8.RuneSelf { + isASCII = false + break + } + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + if pos < i { + b.WriteString(s[pos:i]) + } + b.WriteByte(c) + pos = i + 1 + } + } + if pos < len(s) { + b.WriteString(s[pos:]) + } + + // Optimize for ASCII-only strings. In this case we don't have to do any normalization. + if isASCII { + return b.String() + } + + // Normalise and convert to lower. + return strings.Map(unicode.ToLower, norm.NFKD.String(b.String())) +} + // anyStringWithoutNewlineMatcher is a stringMatcher which matches any string // (including an empty one) as far as it doesn't contain any newline character. type anyStringWithoutNewlineMatcher struct{} diff --git a/model/labels/regexp_test.go b/model/labels/regexp_test.go index 400b5721b7..c86a5cae41 100644 --- a/model/labels/regexp_test.go +++ b/model/labels/regexp_test.go @@ -70,6 +70,7 @@ var ( "jyyfj00j0061|jyyfj00j0062|jyyfj94j0093|jyyfj99j0093|jyyfm01j0021|jyyfm02j0021|jyefj00j0192|jyefj00j0193|jyefj00j0194|jyefj00j0195|jyefj00j0196|jyefj00j0197|jyefj00j0290|jyefj00j0291|jyefj00j0292|jyefj00j0293|jyefj00j0294|jyefj00j0295|jyefj00j0296|jyefj00j0297|jyefj89j0394|jyefj90j0394|jyefj91j0394|jyefj95j0347|jyefj96j0322|jyefj96j0347|jyefj97j0322|jyefj97j0347|jyefj98j0322|jyefj98j0347|jyefj99j0320|jyefj99j0322|jyefj99j0323|jyefj99j0335|jyefj99j0336|jyefj99j0344|jyefj99j0347|jyefj99j0349|jyefj99j0351|jyeff00j0117|lyyfm01j0025|lyyfm01j0028|lyyfm01j0041|lyyfm01j0133|lyyfm01j0701|lyyfm02j0025|lyyfm02j0028|lyyfm02j0041|lyyfm02j0133|lyyfm02j0701|lyyfm03j0701|lyefj00j0775|lyefj00j0776|lyefj00j0777|lyefj00j0778|lyefj00j0779|lyefj00j0780|lyefj00j0781|lyefj00j0782|lyefj50j3807|lyefj50j3852|lyefj51j3807|lyefj51j3852|lyefj52j3807|lyefj52j3852|lyefj53j3807|lyefj53j3852|lyefj54j3807|lyefj54j3852|lyefj54j3886|lyefj55j3807|lyefj55j3852|lyefj55j3886|lyefj56j3807|lyefj56j3852|lyefj56j3886|lyefj57j3807|lyefj57j3852|lyefj57j3886|lyefj58j3807|lyefj58j3852|lyefj58j3886|lyefj59j3807|lyefj59j3852|lyefj59j3886|lyefj60j3807|lyefj60j3852|lyefj60j3886|lyefj61j3807|lyefj61j3852|lyefj61j3886|lyefj62j3807|lyefj62j3852|lyefj62j3886|lyefj63j3807|lyefj63j3852|lyefj63j3886|lyefj64j3807|lyefj64j3852|lyefj64j3886|lyefj65j3807|lyefj65j3852|lyefj65j3886|lyefj66j3807|lyefj66j3852|lyefj66j3886|lyefj67j3807|lyefj67j3852|lyefj67j3886|lyefj68j3807|lyefj68j3852|lyefj68j3886|lyefj69j3807|lyefj69j3846|lyefj69j3852|lyefj69j3886|lyefj70j3807|lyefj70j3846|lyefj70j3852|lyefj70j3886|lyefj71j3807|lyefj71j3846|lyefj71j3852|lyefj71j3886|lyefj72j3807|lyefj72j3846|lyefj72j3852|lyefj72j3886|lyefj73j3807|lyefj73j3846|lyefj73j3852|lyefj73j3886|lyefj74j3807|lyefj74j3846|lyefj74j3852|lyefj74j3886|lyefj75j3807|lyefj75j3808|lyefj75j3846|lyefj75j3852|lyefj75j3886|lyefj76j3732|lyefj76j3807|lyefj76j3808|lyefj76j3846|lyefj76j3852|lyefj76j3886|lyefj77j3732|lyefj77j3807|lyefj77j3808|lyefj77j3846|lyefj77j3852|lyefj77j3886|lyefj78j3278|lyefj78j3732|lyefj78j3807|lyefj78j3808|lyefj78j3846|lyefj78j3852|lyefj78j3886|lyefj79j3732|lyefj79j3807|lyefj79j3808|lyefj79j3846|lyefj79j3852|lyefj79j3886|lyefj80j3732|lyefj80j3807|lyefj80j3808|lyefj80j3846|lyefj80j3852|lyefj80j3886|lyefj81j3732|lyefj81j3807|lyefj81j3808|lyefj81j3846|lyefj81j3852|lyefj81j3886|lyefj82j3732|lyefj82j3807|lyefj82j3808|lyefj82j3846|lyefj82j3852|lyefj82j3886|lyefj83j3732|lyefj83j3807|lyefj83j3808|lyefj83j3846|lyefj83j3852|lyefj83j3886|lyefj84j3732|lyefj84j3807|lyefj84j3808|lyefj84j3846|lyefj84j3852|lyefj84j3886|lyefj85j3732|lyefj85j3807|lyefj85j3808|lyefj85j3846|lyefj85j3852|lyefj85j3886|lyefj86j3278|lyefj86j3732|lyefj86j3807|lyefj86j3808|lyefj86j3846|lyefj86j3852|lyefj86j3886|lyefj87j3278|lyefj87j3732|lyefj87j3807|lyefj87j3808|lyefj87j3846|lyefj87j3852|lyefj87j3886|lyefj88j3732|lyefj88j3807|lyefj88j3808|lyefj88j3846|lyefj88j3852|lyefj88j3886|lyefj89j3732|lyefj89j3807|lyefj89j3808|lyefj89j3846|lyefj89j3852|lyefj89j3886|lyefj90j3732|lyefj90j3807|lyefj90j3808|lyefj90j3846|lyefj90j3852|lyefj90j3886|lyefj91j3732|lyefj91j3807|lyefj91j3808|lyefj91j3846|lyefj91j3852|lyefj91j3886|lyefj92j3732|lyefj92j3807|lyefj92j3808|lyefj92j3846|lyefj92j3852|lyefj92j3886|lyefj93j3732|lyefj93j3807|lyefj93j3808|lyefj93j3846|lyefj93j3852|lyefj93j3885|lyefj93j3886|lyefj94j3525|lyefj94j3732|lyefj94j3807|lyefj94j3808|lyefj94j3846|lyefj94j3852|lyefj94j3885|lyefj94j3886|lyefj95j3525|lyefj95j3732|lyefj95j3807|lyefj95j3808|lyefj95j3846|lyefj95j3852|lyefj95j3886|lyefj96j3732|lyefj96j3803|lyefj96j3807|lyefj96j3808|lyefj96j3846|lyefj96j3852|lyefj96j3886|lyefj97j3333|lyefj97j3732|lyefj97j3792|lyefj97j3803|lyefj97j3807|lyefj97j3808|lyefj97j3838|lyefj97j3843|lyefj97j3846|lyefj97j3852|lyefj97j3886|lyefj98j3083|lyefj98j3333|lyefj98j3732|lyefj98j3807|lyefj98j3808|lyefj98j3838|lyefj98j3843|lyefj98j3846|lyefj98j3852|lyefj98j3873|lyefj98j3877|lyefj98j3882|lyefj98j3886|lyefj99j2984|lyefj99j3083|lyefj99j3333|lyefj99j3732|lyefj99j3807|lyefj99j3808|lyefj99j3846|lyefj99j3849|lyefj99j3852|lyefj99j3873|lyefj99j3877|lyefj99j3882|lyefj99j3884|lyefj99j3886|lyeff00j0106|lyeff00j0107|lyeff00j0108|lyeff00j0129|lyeff00j0130|lyeff00j0131|lyeff00j0132|lyeff00j0133|lyeff00j0134|lyeff00j0444|lyeff00j0445|lyeff91j0473|lyeff92j0473|lyeff92j3877|lyeff93j3877|lyeff94j0501|lyeff94j3525|lyeff94j3877|lyeff95j0501|lyeff95j3525|lyeff95j3877|lyeff96j0503|lyeff96j3877|lyeff97j3877|lyeff98j3333|lyeff98j3877|lyeff99j2984|lyeff99j3333|lyeff99j3877|mfyr9149ej|mfyr9149ek|mfyr9156ej|mfyr9156ek|mfyr9157ej|mfyr9157ek|mfyr9159ej|mfyr9159ek|mfyr9203ej|mfyr9204ej|mfyr9205ej|mfyr9206ej|mfyr9207ej|mfyr9207ek|mfyr9217ej|mfyr9217ek|mfyr9222ej|mfyr9222ek|mfyu0185ej|mfye9187ej|mfye9187ek|mfye9188ej|mfye9188ek|mfye9189ej|mfye9189ek|mfyf0185ej|oyefj87j0007|oyefj88j0007|oyefj89j0007|oyefj90j0007|oyefj91j0007|oyefj95j0001|oyefj96j0001|oyefj98j0004|oyefj99j0004|oyeff91j0004|oyeff92j0004|oyeff93j0004|oyeff94j0004|oyeff95j0004|oyeff96j0004|rklvyaxmany|ryefj93j0001|ryefj94j0001|tyyfj00a0001|tyyfj84j0005|tyyfj85j0005|tyyfj86j0005|tyyfj87j0005|tyyfj88j0005|tyyfj89j0005|tyyfj90j0005|tyyfj91j0005|tyyfj92j0005|tyyfj93j0005|tyyfj94j0005|tyyfj95j0005|tyyfj96j0005|tyyfj97j0005|tyyfj98j0005|tyyfj99j0005|tyefj50j0015|tyefj50j0017|tyefj50j0019|tyefj50j0020|tyefj50j0021|tyefj51j0015|tyefj51j0017|tyefj51j0019|tyefj51j0020|tyefj51j0021|tyefj52j0015|tyefj52j0017|tyefj52j0019|tyefj52j0020|tyefj52j0021|tyefj53j0015|tyefj53j0017|tyefj53j0019|tyefj53j0020|tyefj53j0021|tyefj54j0015|tyefj54j0017|tyefj54j0019|tyefj54j0020|tyefj54j0021|tyefj55j0015|tyefj55j0017|tyefj55j0019|tyefj55j0020|tyefj55j0021|tyefj56j0015|tyefj56j0017|tyefj56j0019|tyefj56j0020|tyefj56j0021|tyefj57j0015|tyefj57j0017|tyefj57j0019|tyefj57j0020|tyefj57j0021|tyefj58j0015|tyefj58j0017|tyefj58j0019|tyefj58j0020|tyefj58j0021|tyefj59j0015|tyefj59j0017|tyefj59j0019|tyefj59j0020|tyefj59j0021|tyefj60j0015|tyefj60j0017|tyefj60j0019|tyefj60j0020|tyefj60j0021|tyefj61j0015|tyefj61j0017|tyefj61j0019|tyefj61j0020|tyefj61j0021|tyefj62j0015|tyefj62j0017|tyefj62j0019|tyefj62j0020|tyefj62j0021|tyefj63j0015|tyefj63j0017|tyefj63j0019|tyefj63j0020|tyefj63j0021|tyefj64j0015|tyefj64j0017|tyefj64j0019|tyefj64j0020|tyefj64j0021|tyefj65j0015|tyefj65j0017|tyefj65j0019|tyefj65j0020|tyefj65j0021|tyefj66j0015|tyefj66j0017|tyefj66j0019|tyefj66j0020|tyefj66j0021|tyefj67j0015|tyefj67j0017|tyefj67j0019|tyefj67j0020|tyefj67j0021|tyefj68j0015|tyefj68j0017|tyefj68j0019|tyefj68j0020|tyefj68j0021|tyefj69j0015|tyefj69j0017|tyefj69j0019|tyefj69j0020|tyefj69j0021|tyefj70j0015|tyefj70j0017|tyefj70j0019|tyefj70j0020|tyefj70j0021|tyefj71j0015|tyefj71j0017|tyefj71j0019|tyefj71j0020|tyefj71j0021|tyefj72j0015|tyefj72j0017|tyefj72j0019|tyefj72j0020|tyefj72j0021|tyefj72j0022|tyefj73j0015|tyefj73j0017|tyefj73j0019|tyefj73j0020|tyefj73j0021|tyefj73j0022|tyefj74j0015|tyefj74j0017|tyefj74j0019|tyefj74j0020|tyefj74j0021|tyefj74j0022|tyefj75j0015|tyefj75j0017|tyefj75j0019|tyefj75j0020|tyefj75j0021|tyefj75j0022|tyefj76j0015|tyefj76j0017|tyefj76j0019|tyefj76j0020|tyefj76j0021|tyefj76j0022|tyefj76j0119|tyefj77j0015|tyefj77j0017|tyefj77j0019|tyefj77j0020|tyefj77j0021|tyefj77j0022|tyefj77j0119|tyefj78j0015|tyefj78j0017|tyefj78j0019|tyefj78j0020|tyefj78j0021|tyefj78j0022|tyefj78j0119|tyefj79j0015|tyefj79j0017|tyefj79j0019|tyefj79j0020|tyefj79j0021|tyefj79j0022|tyefj79j0119|tyefj80j0015|tyefj80j0017|tyefj80j0019|tyefj80j0020|tyefj80j0021|tyefj80j0022|tyefj80j0114|tyefj80j0119|tyefj81j0015|tyefj81j0017|tyefj81j0019|tyefj81j0020|tyefj81j0021|tyefj81j0022|tyefj81j0114|tyefj81j0119|tyefj82j0015|tyefj82j0017|tyefj82j0019|tyefj82j0020|tyefj82j0021|tyefj82j0022|tyefj82j0119|tyefj83j0015|tyefj83j0017|tyefj83j0019|tyefj83j0020|tyefj83j0021|tyefj83j0022|tyefj83j0119|tyefj84j0014|tyefj84j0015|tyefj84j0017|tyefj84j0019|tyefj84j0020|tyefj84j0021|tyefj84j0022|tyefj84j0119|tyefj85j0014|tyefj85j0015|tyefj85j0017|tyefj85j0019|tyefj85j0020|tyefj85j0021|tyefj85j0022|tyefj85j0119|tyefj86j0014|tyefj86j0015|tyefj86j0017|tyefj86j0019|tyefj86j0020|tyefj86j0021|tyefj86j0022|tyefj87j0014|tyefj87j0015|tyefj87j0017|tyefj87j0019|tyefj87j0020|tyefj87j0021|tyefj87j0022|tyefj88j0014|tyefj88j0015|tyefj88j0017|tyefj88j0019|tyefj88j0020|tyefj88j0021|tyefj88j0022|tyefj88j0100|tyefj88j0115|tyefj89j0003|tyefj89j0014|tyefj89j0015|tyefj89j0017|tyefj89j0019|tyefj89j0020|tyefj89j0021|tyefj89j0022|tyefj89j0100|tyefj89j0115|tyefj90j0014|tyefj90j0015|tyefj90j0016|tyefj90j0017|tyefj90j0018|tyefj90j0019|tyefj90j0020|tyefj90j0021|tyefj90j0022|tyefj90j0100|tyefj90j0111|tyefj90j0115|tyefj91j0014|tyefj91j0015|tyefj91j0016|tyefj91j0017|tyefj91j0018|tyefj91j0019|tyefj91j0020|tyefj91j0021|tyefj91j0022|tyefj91j0100|tyefj91j0111|tyefj91j0115|tyefj92j0014|tyefj92j0015|tyefj92j0016|tyefj92j0017|tyefj92j0018|tyefj92j0019|tyefj92j0020|tyefj92j0021|tyefj92j0022|tyefj92j0100|tyefj92j0105|tyefj92j0115|tyefj92j0121|tyefj93j0004|tyefj93j0014|tyefj93j0015|tyefj93j0017|tyefj93j0018|tyefj93j0019|tyefj93j0020|tyefj93j0021|tyefj93j0022|tyefj93j0100|tyefj93j0105|tyefj93j0115|tyefj93j0121|tyefj94j0002|tyefj94j0004|tyefj94j0008|tyefj94j0014|tyefj94j0015|tyefj94j0017|tyefj94j0019|tyefj94j0020|tyefj94j0021|tyefj94j0022|tyefj94j0084|tyefj94j0088|tyefj94j0100|tyefj94j0106|tyefj94j0116|tyefj94j0121|tyefj94j0123|tyefj95j0002|tyefj95j0004|tyefj95j0008|tyefj95j0014|tyefj95j0015|tyefj95j0017|tyefj95j0019|tyefj95j0020|tyefj95j0021|tyefj95j0022|tyefj95j0084|tyefj95j0088|tyefj95j0100|tyefj95j0101|tyefj95j0106|tyefj95j0112|tyefj95j0116|tyefj95j0121|tyefj95j0123|tyefj96j0014|tyefj96j0015|tyefj96j0017|tyefj96j0019|tyefj96j0020|tyefj96j0021|tyefj96j0022|tyefj96j0082|tyefj96j0084|tyefj96j0100|tyefj96j0101|tyefj96j0112|tyefj96j0117|tyefj96j0121|tyefj96j0124|tyefj97j0014|tyefj97j0015|tyefj97j0017|tyefj97j0019|tyefj97j0020|tyefj97j0021|tyefj97j0022|tyefj97j0081|tyefj97j0087|tyefj97j0098|tyefj97j0100|tyefj97j0107|tyefj97j0109|tyefj97j0113|tyefj97j0117|tyefj97j0118|tyefj97j0121|tyefj98j0003|tyefj98j0006|tyefj98j0014|tyefj98j0015|tyefj98j0017|tyefj98j0019|tyefj98j0020|tyefj98j0021|tyefj98j0022|tyefj98j0083|tyefj98j0085|tyefj98j0086|tyefj98j0100|tyefj98j0104|tyefj98j0118|tyefj98j0121|tyefj99j0003|tyefj99j0006|tyefj99j0007|tyefj99j0014|tyefj99j0015|tyefj99j0017|tyefj99j0019|tyefj99j0020|tyefj99j0021|tyefj99j0022|tyefj99j0023|tyefj99j0100|tyefj99j0108|tyefj99j0110|tyefj99j0121|tyefj99j0125|tyeff94j0002|tyeff94j0008|tyeff94j0010|tyeff94j0011|tyeff94j0035|tyeff95j0002|tyeff95j0006|tyeff95j0008|tyeff95j0010|tyeff95j0011|tyeff95j0035|tyeff96j0003|tyeff96j0006|tyeff96j0009|tyeff96j0010|tyeff97j0004|tyeff97j0009|tyeff97j0116|tyeff98j0007|tyeff99j0007|tyeff99j0125|uyyfj00j0484|uyyfj00j0485|uyyfj00j0486|uyyfj00j0487|uyyfj00j0488|uyyfj00j0489|uyyfj00j0490|uyyfj00j0491|uyyfj00j0492|uyyfj00j0493|uyyfj00j0494|uyyfj00j0495|uyyfj00j0496|uyyfj00j0497|uyyfj00j0498|uyyfj00j0499|uyyfj00j0500|uyyfj00j0501|uyyfj00j0502|uyyfj00j0503|uyyfj00j0504|uyyfj00j0505|uyyfj00j0506|uyyfj00j0507|uyyfj00j0508|uyyfj00j0509|uyyfj00j0510|uyyfj00j0511|uyyfj00j0512|uyyfj00j0513|uyyfj00j0514|uyyfj00j0515|uyyfj00j0516|uyyfj00j0517|uyyfj00j0518|uyyfj00j0519|uyyfj00j0520|uyyfj00j0521|uyyfj00j0522|uyyfj00j0523|uyyfj00j0524|uyyfj00j0525|uyyfj00j0526|uyyfj00j0527|uyyfj00j0528|uyyfj00j0529|uyyfj00j0530|uyyfj00j0531|uyyfj00j0532|uyyfj00j0533|uyyfj00j0534|uyyfj00j0535|uyyfj00j0536|uyyfj00j0537|uyyfj00j0538|uyyfj00j0539|uyyfj00j0540|uyyfj00j0541|uyyfj00j0542|uyyfj00j0543|uyyfj00j0544|uyyfj00j0545|uyyfj00j0546|uyyfj00j0547|uyyfj00j0548|uyyfj00j0549|uyyfj00j0550|uyyfj00j0551|uyyfj00j0553|uyyfj00j0554|uyyfj00j0555|uyyfj00j0556|uyyfj00j0557|uyyfj00j0558|uyyfj00j0559|uyyfj00j0560|uyyfj00j0561|uyyfj00j0562|uyyfj00j0563|uyyfj00j0564|uyyfj00j0565|uyyfj00j0566|uyyfj00j0614|uyyfj00j0615|uyyfj00j0616|uyyfj00j0617|uyyfj00j0618|uyyfj00j0619|uyyfj00j0620|uyyfj00j0621|uyyfj00j0622|uyyfj00j0623|uyyfj00j0624|uyyfj00j0625|uyyfj00j0626|uyyfj00j0627|uyyfj00j0628|uyyfj00j0629|uyyfj00j0630|uyyfj00j0631|uyyfj00j0632|uyyfj00j0633|uyyfj00j0634|uyyfj00j0635|uyyfj00j0636|uyyfj00j0637|uyyfj00j0638|uyyfj00j0639|uyyfj00j0640|uyyfj00j0641|uyyfj00j0642|uyyfj00j0643|uyyfj00j0644|uyyfj00j0645|uyyfj00j0646|uyyfj00j0647|uyyfj00j0648|uyyfj00j0649|uyyfj00j0650|uyyfj00j0651|uyyfj00j0652|uyyfj00j0653|uyyfj00j0654|uyyfj00j0655|uyyfj00j0656|uyyfj00j0657|uyyfj00j0658|uyyfj00j0659|uyyfj00j0660|uyyfj00j0661|uyyfj00j0662|uyyfj00j0663|uyyfj00j0664|uyyfj00j0665|uyyfj00j0666|uyyfj00j0667|uyyfj00j0668|uyyfj00j0669|uyyfj00j0670|uyyfj00j0671|uyyfj00j0672|uyyfj00j0673|uyyfj00j0674|uyyfj00j0675|uyyfj00j0676|uyyfj00j0677|uyyfj00j0678|uyyfj00j0679|uyyfj00j0680|uyyfj00j0681|uyyfj00j0682|uyyfj00j0683|uyyfj00j0684|uyyfj00j0685|uyyfj00j0686|uyyfj00j0687|uyyfj00j0688|uyyfj00j0689|uyyfj00j0690|uyyfj00j0691|uyyfj00j0692|uyyfj00j0693|uyyfj00j0694|uyyfj00j0695|uyyfj00j0696|uyyfj00j0697|uyyfj00j0698|uyyfj00j0699|uyyfj00j0700|uyyfj00j0701|uyyfj00j0702|uyyfj00j0703|uyyfj00j0704|uyyfj00j0705|uyyfj00j0706|uyyfj00j0707|uyyfj00j0708|uyyfj00j0709|uyyfj00j0710|uyyfj00j0711|uyyfj00j0712|uyyfj00j0713|uyyfj00j0714|uyyfj00j0715|uyyfj00j0716|uyyfj00j0717|uyyfj00j0718|uyyfj00j0719|uyyfj00j0720|uyyfj00j0721|uyyfj00j0722|uyyfj00j0723|uyyfj00j0724|uyyfj00j0725|uyyfj00j0726|uyyfj00j0727|uyyfj00j0728|uyyfj00j0729|uyyfj00j0730|uyyfj00j0731|uyyfj00j0732|uyyfj00j0733|uyyfj00j0734|uyyfj00j0735|uyyfj00j0736|uyyfj00j0737|uyyfj00j0738|uyyfj00j0739|uyyfj00j0740|uyyfj00j0741|uyyfj00j0742|uyyfj00j0743|uyyfj00j0744|uyyfj00j0745|uyyfj00j0746|uyyfj00j0747|uyyfj00j0748|uyyfj00j0749|uyyfj00j0750|uyyfj00j0751|uyyfj00j0752|uyyfj00j0753|uyyfj00j0754|uyyfj00j0755|uyyfj00j0756|uyyfj00j0757|uyyfj00j0758|uyyfj00j0759|uyyfj00j0760|uyyfj00j0761|uyyfj00j0762|uyyfj00j0763|uyyfj00j0764|uyyfj00j0765|uyyfj00j0766|uyyfj00j0767|uyyfj00j0768|uyyfj00j0769|uyyfj00j0770|uyyfj00j0771|uyyfj00j0772|uyyfj00j0773|uyyfj00j0774|uyyfj00j0775|uyyfj00j0776|uyyfj00j0777|uyyfj00j0778|uyyfj00j0779|uyyfj00j0780|uyyfj00j0781|uyyfj00j0782|uyyff00j0011|uyyff00j0031|uyyff00j0032|uyyff00j0033|uyyff00j0034|uyyff99j0012|uyefj00j0071|uyefj00j0455|uyefj00j0456|uyefj00j0582|uyefj00j0583|uyefj00j0584|uyefj00j0585|uyefj00j0586|uyefj00j0590|uyeff00j0188|xyrly-f-jyy-y01|xyrly-f-jyy-y02|xyrly-f-jyy-y03|xyrly-f-jyy-y04|xyrly-f-jyy-y05|xyrly-f-jyy-y06|xyrly-f-jyy-y07|xyrly-f-jyy-y08|xyrly-f-jyy-y09|xyrly-f-jyy-y10|xyrly-f-jyy-y11|xyrly-f-jyy-y12|xyrly-f-jyy-y13|xyrly-f-jyy-y14|xyrly-f-jyy-y15|xyrly-f-jyy-y16|xyrly-f-url-y01|xyrly-f-url-y02|yyefj97j0005|ybyfcy4000|ybyfcy4001|ayefj99j0035|by-b-y-bzu-l01|by-b-y-bzu-l02|by-b-e-079|by-b-e-080|by-b-e-082|by-b-e-083|byefj72j0002|byefj73j0002|byefj74j0002|byefj75j0002|byefj76j0002|byefj77j0002|byefj78j0002|byefj79j0002|byefj91j0007|byefj92j0007|byefj98j0003|byefj99j0003|byefj99j0005|byefj99j0006|byeff88j0002|byeff89j0002|byeff90j0002|byeff91j0002|byeff92j0002|byeff93j0002|byeff96j0003|byeff97j0003|byeff98j0003|byeff99j0003|fymfj98j0001|fymfj99j0001|fyyaj98k0297|fyyaj99k0297|fyyfj00j0109|fyyfj00j0110|fyyfj00j0122|fyyfj00j0123|fyyfj00j0201|fyyfj00j0202|fyyfj00j0207|fyyfj00j0208|fyyfj00j0227|fyyfj00j0228|fyyfj00j0229|fyyfj00j0230|fyyfj00j0231|fyyfj00j0232|fyyfj00j0233|fyyfj00j0234|fyyfj00j0235|fyyfj00j0236|fyyfj00j0237|fyyfj00j0238|fyyfj00j0239|fyyfj00j0240|fyyfj00j0241|fyyfj00j0242|fyyfj00j0243|fyyfj00j0244|fyyfj00j0245|fyyfj00j0246|fyyfj00j0247|fyyfj00j0248|fyyfj00j0249|fyyfj00j0250|fyyfj00j0251|fyyfj00j0252|fyyfj00j0253|fyyfj00j0254|fyyfj00j0255|fyyfj00j0256|fyyfj00j0257|fyyfj00j0258|fyyfj00j0259|fyyfj00j0260|fyyfj00j0261|fyyfj00j0262|fyyfj00j0263|fyyfj00j0264|fyyfj00j0265|fyyfj00j0266|fyyfj00j0267|fyyfj00j0268|fyyfj00j0290|fyyfj00j0291|fyyfj00j0292|fyyfj00j0293|fyyfj00j0294|fyyfj00j0295|fyyfj00j0296|fyyfj00j0297|fyyfj00j0298|fyyfj00j0299|fyyfj00j0300|fyyfj00j0301|fyyfj00j0302|fyyfj00j0303|fyyfj00j0304|fyyfj00j0305|fyyfj00j0306|fyyfj00j0307|fyyfj00j0308|fyyfj00j0309|fyyfj00j0310|fyyfj00j0311|fyyfj00j0312|fyyfj00j0313|fyyfj00j0314|fyyfj00j0315|fyyfj00j0316|fyyfj00j0317|fyyfj00j0318|fyyfj00j0319|fyyfj00j0320|fyyfj00j0321|fyyfj00j0322|fyyfj00j0323|fyyfj00j0324|fyyfj00j0325|fyyfj00j0326|fyyfj00j0327|fyyfj00j0328|fyyfj00j0329|fyyfj00j0330|fyyfj00j0331|fyyfj00j0332|fyyfj00j0333|fyyfj00j0334|fyyfj00j0335|fyyfj00j0340|fyyfj00j0341|fyyfj00j0342|fyyfj00j0343|fyyfj00j0344|fyyfj00j0345|fyyfj00j0346|fyyfj00j0347|fyyfj00j0348|fyyfj00j0349|fyyfj00j0367|fyyfj00j0368|fyyfj00j0369|fyyfj00j0370|fyyfj00j0371|fyyfj00j0372|fyyfj00j0373|fyyfj00j0374|fyyfj00j0375|fyyfj00j0376|fyyfj00j0377|fyyfj00j0378|fyyfj00j0379|fyyfj00j0380|fyyfj00j0381|fyyfj00j0382|fyyfj00j0383|fyyfj00j0384|fyyfj00j0385|fyyfj00j0386|fyyfj00j0387|fyyfj00j0388|fyyfj00j0415|fyyfj00j0416|fyyfj00j0417|fyyfj00j0418|fyyfj00j0419|fyyfj00j0420|fyyfj00j0421|fyyfj00j0422|fyyfj00j0423|fyyfj00j0424|fyyfj00j0425|fyyfj00j0426|fyyfj00j0427|fyyfj00j0428|fyyfj00j0429|fyyfj00j0430|fyyfj00j0431|fyyfj00j0432|fyyfj00j0433|fyyfj00j0434|fyyfj00j0435|fyyfj00j0436|fyyfj00j0437|fyyfj00j0438|fyyfj00j0439|fyyfj00j0440|fyyfj00j0441|fyyfj00j0446|fyyfj00j0447|fyyfj00j0448|fyyfj00j0449|fyyfj00j0451|fyyfj00j0452|fyyfj00j0453|fyyfj00j0454|fyyfj00j0455|fyyfj00j0456|fyyfj00j0457|fyyfj00j0459|fyyfj00j0460|fyyfj00j0461|fyyfj00j0462|fyyfj00j0463|fyyfj00j0464|fyyfj00j0465|fyyfj00j0466|fyyfj00j0467|fyyfj00j0468|fyyfj00j0469|fyyfj00j0470|fyyfj00j0471|fyyfj00j0474|fyyfj00j0475|fyyfj00j0476|fyyfj00j0477|fyyfj00j0478|fyyfj00j0479|fyyfj00j0480|fyyfj00j0481|fyyfj00j0482|fyyfj00j0483|fyyfj00j0484|fyyfj00j0485|fyyfj00j0486|fyyfj00j0487|fyyfj00j0488|fyyfj00j0489|fyyfj00j0490|fyyfj00j0491|fyyfj00j0492|fyyfj00j0493|fyyfj00j0494|fyyfj00j0495|fyyfj00j0496|fyyfj00j0497|fyyfj00j0498|fyyfj00j0499|fyyfj00j0500|fyyfj00j0501|fyyfj00j0502|fyyfj00j0503|fyyfj00j0504|fyyfj00j0505|fyyfj00j0506|fyyfj00j0507|fyyfj00j0508|fyyfj00j0509|fyyfj00j0510|fyyfj00j0511|fyyfj00j0512|fyyfj00j0513|fyyfj00j0514|fyyfj00j0515|fyyfj00j0516|fyyfj00j0517|fyyfj00j0518|fyyfj00j0521|fyyfj00j0522|fyyfj00j0523|fyyfj00j0524|fyyfj00j0526|fyyfj00j0527|fyyfj00j0528|fyyfj00j0529|fyyfj00j0530|fyyfj00j0531|fyyfj00j0532|fyyfj00j0533|fyyfj00j0534|fyyfj00j0535|fyyfj00j0536|fyyfj00j0537|fyyfj00j0538|fyyfj00j0539|fyyfj00j0540|fyyfj00j0541|fyyfj00j0542|fyyfj00j0543|fyyfj00j0544|fyyfj00j0545|fyyfj00j0546|fyyfj00j0564|fyyfj00j0565|fyyfj00j0566|fyyfj00j0567|fyyfj00j0568|fyyfj00j0569|fyyfj00j0570|fyyfj00j0571|fyyfj00j0572|fyyfj00j0574|fyyfj00j0575|fyyfj00j0576|fyyfj00j0577|fyyfj00j0578|fyyfj00j0579|fyyfj00j0580|fyyfj01j0473|fyyfj02j0473|fyyfj36j0289|fyyfj37j0209|fyyfj37j0289|fyyfj38j0209|fyyfj38j0289|fyyfj39j0209|fyyfj39j0289|fyyfj40j0209|fyyfj40j0289|fyyfj41j0209|fyyfj41j0289|fyyfj42j0209|fyyfj42j0289|fyyfj43j0209|fyyfj43j0289|fyyfj44j0209|fyyfj44j0289|fyyfj45j0104|fyyfj45j0209|fyyfj45j0289|fyyfj46j0104|fyyfj46j0209|fyyfj46j0289|fyyfj47j0104|fyyfj47j0209|fyyfj47j0289|fyyfj48j0104|fyyfj48j0209|fyyfj48j0289|fyyfj49j0104|fyyfj49j0209|fyyfj49j0289|fyyfj50j0104|fyyfj50j0209|fyyfj50j0289|fyyfj50j0500|fyyfj51j0104|fyyfj51j0209|fyyfj51j0289|fyyfj51j0500|fyyfj52j0104|fyyfj52j0209|fyyfj52j0289|fyyfj52j0500|fyyfj53j0104|fyyfj53j0209|fyyfj53j0289|fyyfj53j0500|fyyfj54j0104|fyyfj54j0209|fyyfj54j0289|fyyfj54j0500|fyyfj55j0104|fyyfj55j0209|fyyfj55j0289|fyyfj55j0500|fyyfj56j0104|fyyfj56j0209|fyyfj56j0289|fyyfj56j0500|fyyfj57j0104|fyyfj57j0209|fyyfj57j0289|fyyfj57j0500|fyyfj58j0104|fyyfj58j0209|fyyfj58j0289|fyyfj58j0500|fyyfj59j0104|fyyfj59j0209|fyyfj59j0289|fyyfj59j0500|fyyfj60j0104|fyyfj60j0209|fyyfj60j0289|fyyfj60j0500|fyyfj61j0104|fyyfj61j0209|fyyfj61j0289|fyyfj61j0500|fyyfj62j0104|fyyfj62j0209|fyyfj62j0289|fyyfj62j0500|fyyfj63j0104|fyyfj63j0209|fyyfj63j0289|fyyfj63j0500|fyyfj64j0104|fyyfj64j0107|fyyfj64j0209|fyyfj64j0289|fyyfj64j0500|fyyfj64j0573|fyyfj65j0104|fyyfj65j0107|fyyfj65j0209|fyyfj65j0289|fyyfj65j0500|fyyfj65j0573|fyyfj66j0104|fyyfj66j0107|fyyfj66j0209|fyyfj66j0289|fyyfj66j0500|fyyfj66j0573|fyyfj67j0104|fyyfj67j0107|fyyfj67j0209|fyyfj67j0289|fyyfj67j0500|fyyfj67j0573|fyyfj68j0104|fyyfj68j0107|fyyfj68j0209|fyyfj68j0289|fyyfj68j0500|fyyfj68j0573|fyyfj69j0104|fyyfj69j0107|fyyfj69j0209|fyyfj69j0289|fyyfj69j0500|fyyfj69j0573|fyyfj70j0104|fyyfj70j0107|fyyfj70j0209|fyyfj70j0289|fyyfj70j0472|fyyfj70j0500|fyyfj70j0573|fyyfj71j0104|fyyfj71j0107|fyyfj71j0209|fyyfj71j0289|fyyfj71j0472|fyyfj71j0500|fyyfj71j0573|fyyfj72j0104|fyyfj72j0107|fyyfj72j0209|fyyfj72j0289|fyyfj72j0472|fyyfj72j0500|fyyfj72j0573|fyyfj73j0104|fyyfj73j0107|fyyfj73j0209|fyyfj73j0289|fyyfj73j0472|fyyfj73j0500|fyyfj73j0573|fyyfj74j0104|fyyfj74j0107|fyyfj74j0209|fyyfj74j0289|fyyfj74j0472|fyyfj74j0500|fyyfj74j0573|fyyfj75j0104|fyyfj75j0107|fyyfj75j0108|fyyfj75j0209|fyyfj75j0289|fyyfj75j0472|fyyfj75j0500|fyyfj75j0573|fyyfj76j0104|fyyfj76j0107|fyyfj76j0108|fyyfj76j0209|fyyfj76j0289|fyyfj76j0472|fyyfj76j0500|fyyfj76j0573|fyyfj77j0104|fyyfj77j0107|fyyfj77j0108|fyyfj77j0209|fyyfj77j0289|fyyfj77j0472|fyyfj77j0500|fyyfj77j0573|fyyfj78j0104|fyyfj78j0107|fyyfj78j0108|fyyfj78j0209|fyyfj78j0289|fyyfj78j0472|fyyfj78j0500|fyyfj78j0573|fyyfj79j0104|fyyfj79j0107|fyyfj79j0108|fyyfj79j0209|fyyfj79j0289|fyyfj79j0339|fyyfj79j0472|fyyfj79j0500|fyyfj79j0573|fyyfj80j0104|fyyfj80j0107|fyyfj80j0108|fyyfj80j0209|fyyfj80j0289|fyyfj80j0339|fyyfj80j0352|fyyfj80j0472|fyyfj80j0500|fyyfj80j0573|fyyfj81j0104|fyyfj81j0107|fyyfj81j0108|fyyfj81j0209|fyyfj81j0289|fyyfj81j0339|fyyfj81j0352|fyyfj81j0472|fyyfj81j0500|fyyfj81j0573|fyyfj82j0104|fyyfj82j0107|fyyfj82j0108|fyyfj82j0209|fyyfj82j0289|fyyfj82j0339|fyyfj82j0352|fyyfj82j0472|fyyfj82j0500|fyyfj82j0573|fyyfj83j0104|fyyfj83j0107|fyyfj83j0108|fyyfj83j0209|fyyfj83j0289|fyyfj83j0339|fyyfj83j0352|fyyfj83j0472|fyyfj83j0500|fyyfj83j0573|fyyfj84j0104|fyyfj84j0107|fyyfj84j0108|fyyfj84j0209|fyyfj84j0289|fyyfj84j0339|fyyfj84j0352|fyyfj84j0472|fyyfj84j0500|fyyfj84j0573|fyyfj85j0104|fyyfj85j0107|fyyfj85j0108|fyyfj85j0209|fyyfj85j0289|fyyfj85j0301|fyyfj85j0339|fyyfj85j0352|fyyfj85j0472|fyyfj85j0500|fyyfj85j0573|fyyfj86j0104|fyyfj86j0107|fyyfj86j0108|fyyfj86j0209|fyyfj86j0289|fyyfj86j0301|fyyfj86j0339|fyyfj86j0352|fyyfj86j0472|fyyfj86j0500|fyyfj86j0573|fyyfj87j0067|fyyfj87j0104|fyyfj87j0107|fyyfj87j0108|fyyfj87j0209|fyyfj87j0289|fyyfj87j0301|fyyfj87j0339|fyyfj87j0352|fyyfj87j0472|fyyfj87j0500|fyyfj87j0573|fyyfj88j0067|fyyfj88j0104|fyyfj88j0107|fyyfj88j0108|fyyfj88j0209|fyyfj88j0289|fyyfj88j0301|fyyfj88j0339|fyyfj88j0352|fyyfj88j0472|fyyfj88j0500|fyyfj88j0573|fyyfj89j0067|fyyfj89j0104|fyyfj89j0107|fyyfj89j0108|fyyfj89j0209|fyyfj89j0289|fyyfj89j0301|fyyfj89j0339|fyyfj89j0352|fyyfj89j0358|fyyfj89j0472|fyyfj89j0500|fyyfj89j0573|fyyfj90j0067|fyyfj90j0104|fyyfj90j0107|fyyfj90j0108|fyyfj90j0209|fyyfj90j0289|fyyfj90j0301|fyyfj90j0321|fyyfj90j0339|fyyfj90j0352|fyyfj90j0358|fyyfj90j0452|fyyfj90j0472|fyyfj90j0500|fyyfj90j0573|fyyfj91j0067|fyyfj91j0104|fyyfj91j0107|fyyfj91j0108|fyyfj91j0209|fyyfj91j0289|fyyfj91j0301|fyyfj91j0321|fyyfj91j0339|fyyfj91j0352|fyyfj91j0358|fyyfj91j0452|fyyfj91j0472|fyyfj91j0500|fyyfj91j0573|fyyfj92j0067|fyyfj92j0104|fyyfj92j0107|fyyfj92j0108|fyyfj92j0209|fyyfj92j0289|fyyfj92j0301|fyyfj92j0321|fyyfj92j0339|fyyfj92j0352|fyyfj92j0358|fyyfj92j0452|fyyfj92j0472|fyyfj92j0500|fyyfj92j0573|fyyfj93j0067|fyyfj93j0099|fyyfj93j0104|fyyfj93j0107|fyyfj93j0108|fyyfj93j0209|fyyfj93j0289|fyyfj93j0301|fyyfj93j0321|fyyfj93j0352|fyyfj93j0358|fyyfj93j0452|fyyfj93j0472|fyyfj93j0500|fyyfj93j0573|fyyfj94j0067|fyyfj94j0099|fyyfj94j0104|fyyfj94j0107|fyyfj94j0108|fyyfj94j0209|fyyfj94j0211|fyyfj94j0289|fyyfj94j0301|fyyfj94j0321|fyyfj94j0352|fyyfj94j0358|fyyfj94j0359|fyyfj94j0452|fyyfj94j0472|fyyfj94j0500|fyyfj94j0573|fyyfj95j0067|fyyfj95j0099|fyyfj95j0104|fyyfj95j0107|fyyfj95j0108|fyyfj95j0209|fyyfj95j0211|fyyfj95j0289|fyyfj95j0298|fyyfj95j0301|fyyfj95j0321|fyyfj95j0339|fyyfj95j0352|fyyfj95j0358|fyyfj95j0359|fyyfj95j0414|fyyfj95j0452|fyyfj95j0472|fyyfj95j0500|fyyfj95j0573|fyyfj96j0067|fyyfj96j0099|fyyfj96j0104|fyyfj96j0107|fyyfj96j0108|fyyfj96j0209|fyyfj96j0211|fyyfj96j0289|fyyfj96j0298|fyyfj96j0301|fyyfj96j0321|fyyfj96j0339|fyyfj96j0352|fyyfj96j0358|fyyfj96j0359|fyyfj96j0414|fyyfj96j0452|fyyfj96j0472|fyyfj96j0500|fyyfj96j0573|fyyfj97j0067|fyyfj97j0099|fyyfj97j0100|fyyfj97j0104|fyyfj97j0107|fyyfj97j0108|fyyfj97j0209|fyyfj97j0211|fyyfj97j0289|fyyfj97j0298|fyyfj97j0301|fyyfj97j0321|fyyfj97j0339|fyyfj97j0352|fyyfj97j0358|fyyfj97j0359|fyyfj97j0414|fyyfj97j0445|fyyfj97j0452|fyyfj97j0472|fyyfj97j0500|fyyfj97j0573|fyyfj98j0067|fyyfj98j0099|fyyfj98j0100|fyyfj98j0104|fyyfj98j0107|fyyfj98j0108|fyyfj98j0178|fyyfj98j0209|fyyfj98j0211|fyyfj98j0289|fyyfj98j0298|fyyfj98j0301|fyyfj98j0303|fyyfj98j0321|fyyfj98j0339|fyyfj98j0352|fyyfj98j0358|fyyfj98j0359|fyyfj98j0413|fyyfj98j0414|fyyfj98j0445|fyyfj98j0452|fyyfj98j0472|fyyfj98j0500|fyyfj98j0573|fyyfj99j0067|fyyfj99j0099|fyyfj99j0100|fyyfj99j0104|fyyfj99j0107|fyyfj99j0108|fyyfj99j0131|fyyfj99j0209|fyyfj99j0211|fyyfj99j0285|fyyfj99j0289|fyyfj99j0298|fyyfj99j0301|fyyfj99j0303|fyyfj99j0321|fyyfj99j0339|fyyfj99j0352|fyyfj99j0358|fyyfj99j0359|fyyfj99j0413|fyyfj99j0414|fyyfj99j0445|fyyfj99j0452|fyyfj99j0472|fyyfj99j0500|fyyfj99j0573|fyyfm01j0064|fyyfm01j0070|fyyfm01j0071|fyyfm01j0088|fyyfm01j0091|fyyfm01j0108|fyyfm01j0111|fyyfm01j0112|fyyfm01j0114|fyyfm01j0115|fyyfm01j0133|fyyfm01j0140|fyyfm01j0141|fyyfm01j0142|fyyfm01j0143|fyyfm01j0148|fyyfm01j0149|fyyfm01j0152|fyyfm01j0153|fyyfm01j0155|fyyfm01j0159|fyyfm01j0160|fyyfm01j0163|fyyfm01j0165|fyyfm01j0168|fyyfm01j0169|fyyfm01j0221|fyyfm01j0223|fyyfm01j0268|fyyfm01j0271|fyyfm01j0285|fyyfm01j0299|fyyfm01j0320|fyyfm01j0321|fyyfm01j0360|fyyfm01j0369|fyyfm01j0400|fyyfm01j0401|fyyfm01j0411|fyyfm01j0572|fyyfm01j0765|fyyfm02j0064|fyyfm02j0069|fyyfm02j0070|fyyfm02j0071|fyyfm02j0088|fyyfm02j0091|fyyfm02j0108|fyyfm02j0111|fyyfm02j0112|fyyfm02j0114|fyyfm02j0115|fyyfm02j0133|fyyfm02j0140|fyyfm02j0141|fyyfm02j0142|fyyfm02j0143|fyyfm02j0148|fyyfm02j0149|fyyfm02j0152|fyyfm02j0153|fyyfm02j0155|fyyfm02j0159|fyyfm02j0160|fyyfm02j0163|fyyfm02j0165|fyyfm02j0168|fyyfm02j0169|fyyfm02j0221|fyyfm02j0223|fyyfm02j0268|fyyfm02j0271|fyyfm02j0285|fyyfm02j0299|fyyfm02j0320|fyyfm02j0321|fyyfm02j0360|fyyfm02j0369|fyyfm02j0400|fyyfm02j0572|fyyfm02j0765|fyyfm03j0064|fyyfm03j0070|fyyfm03j0091|fyyfm03j0108|fyyfm03j0111|fyyfm03j0115|fyyfm03j0160|fyyfm03j0165|fyyfm03j0299|fyyfm03j0400|fyyfm03j0572|fyyfm04j0111|fyyfm51j0064|fyyfm51j0369|fyyfm52j0064|fyyfm52j0369|fyyfr88j0003|fyyfr89j0003|fyyff98j0071|fyyff98j0303|fyyff99j0029|fyyff99j0303|fyefj00j0112|fyefj00j0545|fyefj00j0546|fyefj00j0633|fyefj00j0634|fyefj00j0635|fyefj00j0636|fyefj00j0637|fyefj00j0649|fyefj00j0651|fyefj00j0652|fyefj00j0656|fyefj00j0657|fyefj00j0658|fyefj00j0659|fyefj00j0660|fyefj00j0685|fyefj00j0686|fyefj00j0688|fyefj00j0701|fyefj00j0702|fyefj00j0703|fyefj00j0715|fyefj00j0720|fyefj00j0721|fyefj00j0722|fyefj00j0724|fyefj00j0725|fyefj00j0726|fyefj00j0731|fyefj00j0751|fyefj00j0752|fyefj00j0756|fyefj00j0757|fyefj00j0758|fyefj00j0759|fyefj00j0761|fyefj00j0762|fyefj00j0763|fyefj00j0764|fyefj00j0768|fyefj00j0769|fyefj00j0785|fyefj00j0786|fyefj00j0789|fyefj00j0790|fyefj00j0793|fyefj00j0794|fyefj00j0803|fyefj00j0811|fyefj00j0821|fyefj00j0822|fyefj00j0823|fyefj00j0824|fyefj00j0825|fyefj00j0826|fyefj00j0827|fyefj00j0828|fyefj00j0829|fyefj00j0831|fyefj00j0832|fyefj00j0833|fyefj00j0838|fyefj00j0839|fyefj00j0840|fyefj00j0854|fyefj00j0855|fyefj00j0856|fyefj00j0859|fyefj00j0860|fyefj00j0861|fyefj00j0869|fyefj00j0870|fyefj00j0879|fyefj00j0887|fyefj00j0888|fyefj00j0889|fyefj00j0900|fyefj00j0901|fyefj00j0903|fyefj00j0904|fyefj00j0905|fyefj00j0959|fyefj00j0960|fyefj00j0961|fyefj00j1004|fyefj00j1005|fyefj00j1012|fyefj00j1013|fyefj00j1014|fyefj00j1015|fyefj00j1016|fyefj00j1017|fyefj00j1018|fyefj00j1019|fyefj00j1020|fyefj00j1021|fyefj00j1218|fyefj00j1219|fyefj00j1220|fyefj00j1221|fyefj00j1222|fyefj00j1811|fyefj00j1854|fyefj00j1855|fyefj00j1856|fyefj01j0707|fyefj02j0707|fyefj03j0707|fyefj66j0001|fyefj67j0001|fyefj68j0001|fyefj68j1064|fyefj69j0001|fyefj69j1064|fyefj70j0001|fyefj70j0859|fyefj70j1064|fyefj71j0001|fyefj71j1064|fyefj72j0001|fyefj72j1064|fyefj73j0001|fyefj73j1064|fyefj74j0001|fyefj74j1064|fyefj75j0001|fyefj75j1064|fyefj75j1092|fyefj76j0001|fyefj76j1064|fyefj76j1092|fyefj77j0001|fyefj77j1064|fyefj77j1092|fyefj78j0001|fyefj78j1064|fyefj78j1092|fyefj79j0001|fyefj79j1064|fyefj79j1092|fyefj80j0001|fyefj80j0859|fyefj80j1064|fyefj80j1077|fyefj80j1092|fyefj81j0001|fyefj81j1064|fyefj81j1077|fyefj81j1092|fyefj82j0001|fyefj82j1064|fyefj82j1092|fyefj83j0001|fyefj83j1064|fyefj83j1092|fyefj84j0001|fyefj84j1064|fyefj84j1092|fyefj85j0001|fyefj85j0356|fyefj85j1064|fyefj85j1092|fyefj86j0001|fyefj86j0356|fyefj86j1064|fyefj87j0001|fyefj87j0356|fyefj87j1064|fyefj88j0001|fyefj88j0356|fyefj88j1064|fyefj89j0001|fyefj89j0356|fyefj89j1064|fyefj89j1067|fyefj90j0001|fyefj90j0758|fyefj90j1021|fyefj90j1064|fyefj90j1067|fyefj91j0001|fyefj91j0758|fyefj91j0791|fyefj91j1021|fyefj91j1064|fyefj91j1067|fyefj91j1077|fyefj92j0001|fyefj92j0359|fyefj92j0678|fyefj92j0758|fyefj92j0791|fyefj92j0867|fyefj92j1021|fyefj92j1064|fyefj92j1077|fyefj93j0001|fyefj93j0359|fyefj93j0678|fyefj93j0758|fyefj93j0791|fyefj93j0867|fyefj93j1010|fyefj93j1021|fyefj93j1049|fyefj93j1064|fyefj93j1077|fyefj94j0001|fyefj94j0678|fyefj94j0758|fyefj94j0791|fyefj94j0867|fyefj94j1010|fyefj94j1021|fyefj94j1049|fyefj94j1064|fyefj94j1070|fyefj94j1077|fyefj94j1085|fyefj95j0001|fyefj95j0678|fyefj95j0758|fyefj95j0791|fyefj95j0867|fyefj95j0965|fyefj95j0966|fyefj95j1010|fyefj95j1011|fyefj95j1021|fyefj95j1055|fyefj95j1064|fyefj95j1069|fyefj95j1077|fyefj95j1085|fyefj95j1089|fyefj96j0001|fyefj96j0106|fyefj96j0671|fyefj96j0678|fyefj96j0758|fyefj96j0791|fyefj96j0814|fyefj96j0836|fyefj96j0867|fyefj96j0931|fyefj96j0965|fyefj96j0966|fyefj96j0976|fyefj96j1010|fyefj96j1021|fyefj96j1051|fyefj96j1055|fyefj96j1064|fyefj96j1068|fyefj96j1070|fyefj96j1077|fyefj96j1079|fyefj96j1081|fyefj96j1086|fyefj96j1088|fyefj96j1091|fyefj96j1093|fyefj96j1094|fyefj97j0001|fyefj97j0106|fyefj97j0584|fyefj97j0586|fyefj97j0671|fyefj97j0678|fyefj97j0758|fyefj97j0791|fyefj97j0814|fyefj97j0825|fyefj97j0836|fyefj97j0863|fyefj97j0865|fyefj97j0867|fyefj97j0914|fyefj97j0931|fyefj97j0952|fyefj97j0965|fyefj97j0966|fyefj97j0969|fyefj97j0971|fyefj97j0972|fyefj97j0976|fyefj97j0985|fyefj97j1010|fyefj97j1021|fyefj97j1051|fyefj97j1052|fyefj97j1055|fyefj97j1058|fyefj97j1059|fyefj97j1064|fyefj97j1068|fyefj97j1077|fyefj97j1079|fyefj97j1081|fyefj97j1086|fyefj97j1088|fyefj97j1095|fyefj98j0001|fyefj98j0243|fyefj98j0326|fyefj98j0329|fyefj98j0343|fyefj98j0344|fyefj98j0380|fyefj98j0472|fyefj98j0584|fyefj98j0586|fyefj98j0604|fyefj98j0671|fyefj98j0673|fyefj98j0676|fyefj98j0677|fyefj98j0678|fyefj98j0694|fyefj98j0758|fyefj98j0814|fyefj98j0825|fyefj98j0836|fyefj98j0863|fyefj98j0865|fyefj98j0867|fyefj98j0896|fyefj98j0898|fyefj98j0901|fyefj98j0906|fyefj98j0910|fyefj98j0913|fyefj98j0914|fyefj98j0922|fyefj98j0931|fyefj98j0934|fyefj98j0936|fyefj98j0951|fyefj98j0952|fyefj98j0963|fyefj98j0965|fyefj98j0966|fyefj98j0969|fyefj98j0971|fyefj98j0972|fyefj98j0974|fyefj98j0975|fyefj98j0976|fyefj98j0977|fyefj98j0978|fyefj98j0985|fyefj98j0992|fyefj98j1008|fyefj98j1009|fyefj98j1010|fyefj98j1011|fyefj98j1012|fyefj98j1019|fyefj98j1021|fyefj98j1028|fyefj98j1034|fyefj98j1039|fyefj98j1046|fyefj98j1047|fyefj98j1048|fyefj98j1054|fyefj98j1055|fyefj98j1064|fyefj98j1068|fyefj98j1077|fyefj98j1079|fyefj98j1080|fyefj98j1081|fyefj98j1082|fyefj98j1084|fyefj98j1087|fyefj98j1088|fyefj98j1090|fyefj99j0010|fyefj99j0188|fyefj99j0243|fyefj99j0268|fyefj99j0280|fyefj99j0301|fyefj99j0329|fyefj99j0343|fyefj99j0344|fyefj99j0380|fyefj99j0552|fyefj99j0573|fyefj99j0584|fyefj99j0586|fyefj99j0604|fyefj99j0671|fyefj99j0673|fyefj99j0676|fyefj99j0677|fyefj99j0678|fyefj99j0694|fyefj99j0722|fyefj99j0757|fyefj99j0758|fyefj99j0771|fyefj99j0772|fyefj99j0804|fyefj99j0806|fyefj99j0809|fyefj99j0814|fyefj99j0825|fyefj99j0836|fyefj99j0862|fyefj99j0863|fyefj99j0865|fyefj99j0866|fyefj99j0867|fyefj99j0875|fyefj99j0896|fyefj99j0898|fyefj99j0901|fyefj99j0906|fyefj99j0907|fyefj99j0908|fyefj99j0910|fyefj99j0912|fyefj99j0913|fyefj99j0914|fyefj99j0921|fyefj99j0922|fyefj99j0923|fyefj99j0931|fyefj99j0934|fyefj99j0936|fyefj99j0937|fyefj99j0949|fyefj99j0951|fyefj99j0952|fyefj99j0962|fyefj99j0963|fyefj99j0965|fyefj99j0966|fyefj99j0969|fyefj99j0971|fyefj99j0972|fyefj99j0974|fyefj99j0975|fyefj99j0976|fyefj99j0977|fyefj99j0978|fyefj99j0982|fyefj99j0985|fyefj99j0986|fyefj99j0988|fyefj99j0991|fyefj99j0992|fyefj99j0995|fyefj99j0997|fyefj99j0999|fyefj99j1003|fyefj99j1006|fyefj99j1008|fyefj99j1009|fyefj99j1010|fyefj99j1011|fyefj99j1016|fyefj99j1019|fyefj99j1020|fyefj99j1021|fyefj99j1024|fyefj99j1026|fyefj99j1028|fyefj99j1031|fyefj99j1033|fyefj99j1034|fyefj99j1036|fyefj99j1039|fyefj99j1042|fyefj99j1045|fyefj99j1046|fyefj99j1048|fyefj99j1053|fyefj99j1054|fyefj99j1055|fyefj99j1061|fyefj99j1062|fyefj99j1063|fyefj99j1064|fyefj99j1068|fyefj99j1072|fyefj99j1076|fyefj99j1077|fyefj99j1079|fyefj99j1080|fyefj99j1081|fyefj99j1083|fyefj99j1084|fyefj99j1087|fyefj99j1088|fyefm00j0113|fyefm01j0057|fyefm01j0088|fyefm01j0091|fyefm01j0101|fyefm01j0104|fyefm01j0107|fyefm01j0112|fyefm01j0379|fyefm02j0057|fyefm02j0101|fyefm02j0104|fyefm02j0107|fyefm02j0112|fyefm02j0379|fyefm98j0066|fyefm99j0066|fyefm99j0090|fyefm99j0093|fyefm99j0110|fyefm99j0165|fyefm99j0208|fyefm99j0209|fyefm99j0295|fyefm99j0401|fyefm99j0402|fyefm99j0907|fyefm99j1054|fyefn98j0015|fyefn98j0024|fyefn98j0030|fyefn99j0015|fyefn99j0024|fyefn99j0030|fyefr94j0559|fyefr95j0559|fyefr96j0559|fyefr97j0559|fyefr98j0559|fyefr99j0012|fyefr99j0559|fyefb01305|fyeff00j0170|fyeff00j0224|fyeff00j0227|fyeff00j0228|fyeff00j0229|fyeff00j0280|fyeff00j0281|fyeff00j0282|fyeff00j0283|fyeff00j0288|fyeff00j0289|fyeff00j0331|fyeff00j0332|fyeff00j0333|fyeff00j0334|fyeff00j0335|fyeff00j0336|fyeff00j0337|fyeff00j0338|fyeff00j0346|fyeff00j0347|fyeff00j0348|fyeff00j0349|fyeff00j0350|fyeff00j0351|fyeff00j0357|fyeff00j0358|fyeff00j0371|fyeff00j0372|fyeff00j0396|fyeff00j0397|fyeff00j0424|fyeff00j0425|fyeff01j0416|fyeff02j0416|fyeff78j0418|fyeff79j0418|fyeff79j1051|fyeff80j1051|fyeff81j1051|fyeff82j1051|fyeff83j1051|fyeff84j1051|fyeff85j1051|fyeff86j1051|fyeff87j1051|fyeff88j0422|fyeff89j0422|fyeff90j0422|fyeff90j0434|fyeff90j0440|fyeff91j0422|fyeff91j0434|fyeff91j0440|fyeff92j0440|fyeff93j0440|fyeff93j1045|fyeff93j1067|fyeff94j0392|fyeff94j0440|fyeff94j0443|fyeff94j1045|fyeff94j1067|fyeff95j0219|fyeff95j0392|fyeff95j0439|fyeff95j0440|fyeff95j0443|fyeff96j0053|fyeff96j0219|fyeff96j0392|fyeff96j0429|fyeff96j0434|fyeff96j0950|fyeff96j1019|fyeff96j1028|fyeff97j0053|fyeff97j0178|fyeff97j0191|fyeff97j0219|fyeff97j0221|fyeff97j0258|fyeff97j0324|fyeff97j0355|fyeff97j0370|fyeff97j0377|fyeff97j0392|fyeff97j0429|fyeff97j0434|fyeff97j0950|fyeff97j1019|fyeff98j0053|fyeff98j0065|fyeff98j0101|fyeff98j0144|fyeff98j0156|fyeff98j0178|fyeff98j0191|fyeff98j0193|fyeff98j0196|fyeff98j0197|fyeff98j0209|fyeff98j0210|fyeff98j0211|fyeff98j0214|fyeff98j0215|fyeff98j0218|fyeff98j0219|fyeff98j0221|fyeff98j0258|fyeff98j0260|fyeff98j0279|fyeff98j0284|fyeff98j0295|fyeff98j0296|fyeff98j0298|fyeff98j0324|fyeff98j0355|fyeff98j0370|fyeff98j0376|fyeff98j0379|fyeff98j0381|fyeff98j0392|fyeff98j0401|fyeff98j0404|fyeff98j0405|fyeff98j0407|fyeff98j0411|fyeff98j0418|fyeff98j0421|fyeff98j0423|fyeff98j0433|fyeff98j0436|fyeff98j0673|fyeff98j0896|fyeff98j0950|fyeff98j0985|fyeff98j1012|fyeff99j0053|fyeff99j0065|fyeff99j0152|fyeff99j0156|fyeff99j0159|fyeff99j0178|fyeff99j0191|fyeff99j0193|fyeff99j0196|fyeff99j0197|fyeff99j0209|fyeff99j0210|fyeff99j0211|fyeff99j0214|fyeff99j0215|fyeff99j0218|fyeff99j0219|fyeff99j0220|fyeff99j0221|fyeff99j0260|fyeff99j0279|fyeff99j0284|fyeff99j0291|fyeff99j0295|fyeff99j0296|fyeff99j0297|fyeff99j0298|fyeff99j0324|fyeff99j0339|fyeff99j0355|fyeff99j0370|fyeff99j0376|fyeff99j0379|fyeff99j0381|fyeff99j0392|fyeff99j0401|fyeff99j0404|fyeff99j0405|fyeff99j0407|fyeff99j0410|fyeff99j0411|fyeff99j0413|fyeff99j0414|fyeff99j0415|fyeff99j0418|fyeff99j0421|fyeff99j0423|fyeff99j0436|fyeff99j0673|fyeff99j0896|fyeff99j0950|fyeff99j0962|fyeff99j0985|fyeff99j1010|fyeff99j1012|fyeff99j1028|fyeff99j1090|fyeff99j1370|fayfm01j0148|fayfm01j0149|fayfm01j0155|fayfm02j0148|fayfm02j0149|fayfm02j0155|faefj00j0594|faefj00j0595|faefj00j0596|faefj00j0597|faefj01j0707|faefj02j0707|faefj03j0707|faefj90j1023|faefj91j1023|faefj92j1023|faefj94j1056|faefj95j1023|faefj95j1056|faefj96j1056|faefj98j1038|faefj99j1078|fdeff99j9001|fdeff99j9002|gyefj99j0005", // A long case insensitive alternation. "(?i:(zQPbMkNO|NNSPdvMi|iWuuSoAl|qbvKMimS|IecrXtPa|seTckYqt|NxnyHkgB|fIDlOgKb|UhlWIygH|OtNoJxHG|cUTkFVIV|mTgFIHjr|jQkoIDtE|PPMKxRXl|AwMfwVkQ|CQyMrTQJ|BzrqxVSi|nTpcWuhF|PertdywG|ZZDgCtXN|WWdDPyyE|uVtNQsKk|BdeCHvPZ|wshRnFlH|aOUIitIp|RxZeCdXT|CFZMslCj|AVBZRDxl|IzIGCnhw|ythYuWiz|oztXVXhl|VbLkwqQx|qvaUgyVC|VawUjPWC|ecloYJuj|boCLTdSU|uPrKeAZx|hrMWLWBq|JOnUNHRM|rYnujkPq|dDEdZhIj|DRrfvugG|yEGfDxVV|YMYdJWuP|PHUQZNWM|AmKNrLis|zTxndVfn|FPsHoJnc|EIulZTua|KlAPhdzg|ScHJJCLt|NtTfMzME|eMCwuFdo|SEpJVJbR|cdhXZeCx|sAVtBwRh|kVFEVcMI|jzJrxraA|tGLHTell|NNWoeSaw|DcOKSetX|UXZAJyka|THpMphDP|rizheevl|kDCBRidd|pCZZRqyu|pSygkitl|SwZGkAaW|wILOrfNX|QkwVOerj|kHOMxPDr|EwOVycJv|AJvtzQFS|yEOjKYYB|LizIINLL|JBRSsfcG|YPiUqqNl|IsdEbvee|MjEpGcBm|OxXZVgEQ|xClXGuxa|UzRCGFEb|buJbvfvA|IPZQxRet|oFYShsMc|oBHffuHO|bzzKrcBR|KAjzrGCl|IPUsAVls|OGMUMbIU|gyDccHuR|bjlalnDd|ZLWjeMna|fdsuIlxQ|dVXtiomV|XxedTjNg|XWMHlNoA|nnyqArQX|opfkWGhb|wYtnhdYb))", + "(?i:(AAAAAAAAAAAAAAAAAAAAAAAA|BBBBBBBBBBBBBBBBBBBBBBBB|cccccccccccccccccccccccC|ſſſſſſſſſſſſſſſſſſſſſſſſS|SSSSSSSSSSSSSSSSSSSSSSSSſ))", // A long case insensitive alternation where each entry ends with ".*". "(?i:(zQPbMkNO.*|NNSPdvMi.*|iWuuSoAl.*|qbvKMimS.*|IecrXtPa.*|seTckYqt.*|NxnyHkgB.*|fIDlOgKb.*|UhlWIygH.*|OtNoJxHG.*|cUTkFVIV.*|mTgFIHjr.*|jQkoIDtE.*|PPMKxRXl.*|AwMfwVkQ.*|CQyMrTQJ.*|BzrqxVSi.*|nTpcWuhF.*|PertdywG.*|ZZDgCtXN.*|WWdDPyyE.*|uVtNQsKk.*|BdeCHvPZ.*|wshRnFlH.*|aOUIitIp.*|RxZeCdXT.*|CFZMslCj.*|AVBZRDxl.*|IzIGCnhw.*|ythYuWiz.*|oztXVXhl.*|VbLkwqQx.*|qvaUgyVC.*|VawUjPWC.*|ecloYJuj.*|boCLTdSU.*|uPrKeAZx.*|hrMWLWBq.*|JOnUNHRM.*|rYnujkPq.*|dDEdZhIj.*|DRrfvugG.*|yEGfDxVV.*|YMYdJWuP.*|PHUQZNWM.*|AmKNrLis.*|zTxndVfn.*|FPsHoJnc.*|EIulZTua.*|KlAPhdzg.*|ScHJJCLt.*|NtTfMzME.*|eMCwuFdo.*|SEpJVJbR.*|cdhXZeCx.*|sAVtBwRh.*|kVFEVcMI.*|jzJrxraA.*|tGLHTell.*|NNWoeSaw.*|DcOKSetX.*|UXZAJyka.*|THpMphDP.*|rizheevl.*|kDCBRidd.*|pCZZRqyu.*|pSygkitl.*|SwZGkAaW.*|wILOrfNX.*|QkwVOerj.*|kHOMxPDr.*|EwOVycJv.*|AJvtzQFS.*|yEOjKYYB.*|LizIINLL.*|JBRSsfcG.*|YPiUqqNl.*|IsdEbvee.*|MjEpGcBm.*|OxXZVgEQ.*|xClXGuxa.*|UzRCGFEb.*|buJbvfvA.*|IPZQxRet.*|oFYShsMc.*|oBHffuHO.*|bzzKrcBR.*|KAjzrGCl.*|IPUsAVls.*|OGMUMbIU.*|gyDccHuR.*|bjlalnDd.*|ZLWjeMna.*|fdsuIlxQ.*|dVXtiomV.*|XxedTjNg.*|XWMHlNoA.*|nnyqArQX.*|opfkWGhb.*|wYtnhdYb.*))", // A long case insensitive alternation where each entry starts with ".*". @@ -81,6 +82,7 @@ var ( ".*foo.?", ".?foo.+", "foo.?|bar", + "ſſs", // Concat of literals and wildcards. ".*-.*-.*-.*-.*", "(.+)-(.+)-(.+)-(.+)-(.+)", @@ -91,7 +93,7 @@ var ( "foo", " foo bar", "bar", "buzz\nbar", "bar foo", "bfoo", "\n", "\nfoo", "foo\n", "hello foo world", "hello foo\n world", "", "FOO", "Foo", "fOo", "foO", "OO", "Oo", "\nfoo\n", strings.Repeat("f", 20), "prometheus", "prometheus_api_v1", "prometheus_api_v1_foo", "10.0.1.20", "10.0.2.10", "10.0.3.30", "10.0.4.40", - "foofoo0", "foofoo", "😀foo0", + "foofoo0", "foofoo", "😀foo0", "ſſs", "ſſS", "AAAAAAAAAAAAAAAAAAAAAAAA", "BBBBBBBBBBBBBBBBBBBBBBBB", "cccccccccccccccccccccccC", "ſſſſſſſſſſſſſſſſſſſſſſſſS", "SSSSSSSSSSSSSSSSSSSSSSSSſ", // Values matching / not matching the test regexps on long alternations. "zQPbMkNO", "zQPbMkNo", "jyyfj00j0061", "jyyfj00j006", "jyyfj00j00612", "NNSPdvMi", "NNSPdvMiXXX", "NNSPdvMixxx", "nnSPdvMi", "nnSPdvMiXXX", @@ -293,6 +295,52 @@ func BenchmarkFastRegexMatcher(b *testing.B) { } } +func BenchmarkToNormalizedLower(b *testing.B) { + benchCase := func(l int, uppercase string, asciiOnly bool, alt int) string { + chars := "abcdefghijklmnopqrstuvwxyz" + if !asciiOnly { + chars = "aаbбcвdгeдfеgёhжiзjиkйlкmлnмoнpоqпrрsсtтuуvфwхxцyчzш" + } + // Swap the alphabet to make alternatives. + chars = chars[alt%len(chars):] + chars[:alt%len(chars)] + + str := strings.Repeat(chars, l/len(chars)+1)[:l] + switch uppercase { + case "first": + return strings.ToUpper(str[:1]) + str[1:] + case "last": + return str[:len(str)-1] + strings.ToUpper(str[len(str)-1:]) + case "all": + return strings.ToUpper(str) + case "none": + return str + default: + panic("invalid uppercase") + } + } + + for _, l := range []int{10, 100, 1000, 4000} { + b.Run(fmt.Sprintf("length=%d", l), func(b *testing.B) { + for _, uppercase := range []string{"none", "first", "last", "all"} { + b.Run("uppercase="+uppercase, func(b *testing.B) { + for _, asciiOnly := range []bool{true, false} { + b.Run(fmt.Sprintf("ascii=%t", asciiOnly), func(b *testing.B) { + inputs := make([]string, 10) + for i := range inputs { + inputs[i] = benchCase(l, uppercase, asciiOnly, i) + } + b.ResetTimer() + for n := 0; n < b.N; n++ { + toNormalisedLower(inputs[n%len(inputs)]) + } + }) + } + }) + } + }) + } +} + func TestStringMatcherFromRegexp(t *testing.T) { for _, c := range []struct { pattern string @@ -1157,3 +1205,16 @@ func visitStringMatcher(matcher StringMatcher, callback func(matcher StringMatch case trueMatcher: } } + +func TestToNormalisedLower(t *testing.T) { + testCases := map[string]string{ + "foo": "foo", + "AAAAAAAAAAAAAAAAAAAAAAAA": "aaaaaaaaaaaaaaaaaaaaaaaa", + "cccccccccccccccccccccccC": "cccccccccccccccccccccccc", + "ſſſſſſſſſſſſſſſſſſſſſſſſS": "sssssssssssssssssssssssss", + "ſſAſſa": "ssassa", + } + for input, expectedOutput := range testCases { + require.Equal(t, expectedOutput, toNormalisedLower(input)) + } +} From ea2b39a31e7b091d4ad25f1c6bcbfea57e37bd67 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Mon, 10 Jun 2024 09:34:55 +0200 Subject: [PATCH 17/44] Tune default GOGC Adjust the default GOGC value to 75. This is less of a memory savings, but has less impact on CPU use. Signed-off-by: SuperQ --- config/config.go | 2 +- docs/configuration/configuration.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/config/config.go b/config/config.go index d02081d2e1..9defa10d48 100644 --- a/config/config.go +++ b/config/config.go @@ -154,7 +154,7 @@ var ( DefaultRuntimeConfig = RuntimeConfig{ // Go runtime tuning. - GoGC: 50, + GoGC: 75, } // DefaultScrapeConfig is the default scrape configuration. diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 26c088e135..f0e13cf136 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -125,7 +125,7 @@ runtime: # Configure the Go garbage collector GOGC parameter # See: https://tip.golang.org/doc/gc-guide#GOGC # Lowering this number increases CPU usage. - [ gogc: | default = 50 ] + [ gogc: | default = 75 ] # Rule files specifies a list of globs. Rules and alerts are read from # all matching files. From 6ccee2c4a537f0ebcab79522710635e987c86282 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Mon, 10 Jun 2024 09:34:55 +0200 Subject: [PATCH 18/44] Tune default GOGC Adjust the default GOGC value to 75. This is less of a memory savings, but has less impact on CPU use. Signed-off-by: SuperQ --- config/config.go | 2 +- docs/configuration/configuration.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/config/config.go b/config/config.go index d02081d2e1..9defa10d48 100644 --- a/config/config.go +++ b/config/config.go @@ -154,7 +154,7 @@ var ( DefaultRuntimeConfig = RuntimeConfig{ // Go runtime tuning. - GoGC: 50, + GoGC: 75, } // DefaultScrapeConfig is the default scrape configuration. diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 26c088e135..f0e13cf136 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -125,7 +125,7 @@ runtime: # Configure the Go garbage collector GOGC parameter # See: https://tip.golang.org/doc/gc-guide#GOGC # Lowering this number increases CPU usage. - [ gogc: | default = 50 ] + [ gogc: | default = 75 ] # Rule files specifies a list of globs. Rules and alerts are read from # all matching files. From 38bf349ff774d45708a994519b1636d47bcbf721 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Tue, 11 Jun 2024 11:18:13 +0200 Subject: [PATCH 19/44] Update changelog for GOGC tuning Include #14285 in changelog. Signed-off-by: SuperQ --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0bc3abd71e..e902a775f2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ ## unreleased +This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 75. + +* [CHANGE] Runtime: Change GOGC threshold from 50 to 75 #14285 + ## 2.53.0-rc.0 / 2024-06-06 This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 50. From 4cfec57606dd8c407f8524f4a8913a80a600cb9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Fri, 7 Jun 2024 10:46:13 +0200 Subject: [PATCH 20/44] Revert "Update changelog due to pr 14273" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit dd4400146521c996239da57d2a225a608e3915cb. Signed-off-by: György Krajcsovits --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e902a775f2..34820be452 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,7 +12,7 @@ This release changes the default for GOGC, the Go runtime control for the trade- * [CHANGE] Rules: Execute 1 query instead of N (where N is the number of alerts within alert rule) when restoring alerts. #13980 #14048 * [CHANGE] Runtime: Change GOGC threshold from 100 to 50 #14176 -* [FEATURE] Rules: Add new option `query_offset` for each rule group via rule group configuration file and `rule_query_offset` as part of the global configuration to have more resilience for remote write delays. #14061 #14216 #14273 +* [FEATURE] Rules: Add new option `query_offset` for each rule group via rule group configuration file and `rule_query_offset` as part of the global configuration to have more resilience for remote write delays. #14061 #14216 * [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` metric to measure the time it takes to restore a rule group. #13974 * [ENHANCEMENT] OTLP: Improve remote write format translation performance by using label set hashes for metric identifiers instead of string based ones. #14006 #13991 * [ENHANCEMENT] TSDB: Optimize querying with regexp matchers. #13620 From dd8676218b83cc07c046ce8d221c273f580d7c0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 11 Jun 2024 12:55:26 +0200 Subject: [PATCH 21/44] Prepare 2.53.0-rc.1 release MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- CHANGELOG.md | 3 +++ VERSION | 2 +- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/package.json | 2 +- web/ui/react-app/package.json | 4 ++-- 7 files changed, 17 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 34820be452..0c6da426a6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,9 +2,12 @@ ## unreleased +## 2.53.0-rc.1 / 2024-06-11 + This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 75. * [CHANGE] Runtime: Change GOGC threshold from 50 to 75 #14285 +* [BUGFIX] Rules: Fix Group.Equals() to take in account the new queryOffset too. Followup to #14061. #14273 ## 2.53.0-rc.0 / 2024-06-06 diff --git a/VERSION b/VERSION index ae392bf33c..8d108ef311 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.53.0-rc.0 +2.53.0-rc.1 diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 152abc8c7b..5039e8bd26 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.53.0-rc.0", + "version": "0.53.0-rc.1", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.53.0-rc.0", + "@prometheus-io/lezer-promql": "0.53.0-rc.1", "lru-cache": "^7.18.3" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 93486b8dec..6b155d00e0 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.53.0-rc.0", + "version": "0.53.0-rc.1", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index d002109ddd..e1610c1152 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.53.0-rc.0", + "version": "0.53.0-rc.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.53.0-rc.0", + "version": "0.53.0-rc.1", "workspaces": [ "react-app", "module/*" @@ -30,10 +30,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.53.0-rc.0", + "version": "0.53.0-rc.1", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.53.0-rc.0", + "@prometheus-io/lezer-promql": "0.53.0-rc.1", "lru-cache": "^7.18.3" }, "devDependencies": { @@ -69,7 +69,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.53.0-rc.0", + "version": "0.53.0-rc.1", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.5.1", @@ -19233,7 +19233,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.53.0-rc.0", + "version": "0.53.0-rc.1", "dependencies": { "@codemirror/autocomplete": "^6.11.1", "@codemirror/commands": "^6.3.2", @@ -19251,7 +19251,7 @@ "@lezer/lr": "^1.3.14", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.53.0-rc.0", + "@prometheus-io/codemirror-promql": "0.53.0-rc.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.2", diff --git a/web/ui/package.json b/web/ui/package.json index 4c9ce03e4e..06f44fe221 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -28,5 +28,5 @@ "ts-jest": "^29.1.1", "typescript": "^4.9.5" }, - "version": "0.53.0-rc.0" + "version": "0.53.0-rc.1" } diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index d21cf3db5b..63b3d60efc 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.53.0-rc.0", + "version": "0.53.0-rc.1", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.11.1", @@ -19,7 +19,7 @@ "@lezer/lr": "^1.3.14", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.53.0-rc.0", + "@prometheus-io/codemirror-promql": "0.53.0-rc.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.2", From 64a9abb8be73a25e486382e7160773697b7182d8 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Tue, 11 Jun 2024 15:36:46 +0200 Subject: [PATCH 22/44] Change LabelValuesFor() to accept index.Postings (#14280) The only call we have to LabelValuesFor() has an index.Postings, and we expand it to pass to this method, which will iterate over the values. That's a waste of resources: we can iterate on the index.Postings directly. If there's any downstream implementation that has a slice of series, they can always do an index.ListPostings from them: doing that is cheaper than expanding an abstract index.Postings. Signed-off-by: Oleg Zaytsev --- tsdb/block.go | 10 +++++----- tsdb/head_read.go | 15 ++++++++++----- tsdb/index/index.go | 9 ++++++--- tsdb/ooo_head_read.go | 2 +- tsdb/querier.go | 11 +---------- tsdb/querier_test.go | 13 ++++++++----- 6 files changed, 31 insertions(+), 29 deletions(-) diff --git a/tsdb/block.go b/tsdb/block.go index 83b86a58d1..d2e7aa6fac 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -103,9 +103,9 @@ type IndexReader interface { // storage.ErrNotFound is returned as error. LabelValueFor(ctx context.Context, id storage.SeriesRef, label string) (string, error) - // LabelNamesFor returns all the label names for the series referred to by IDs. + // LabelNamesFor returns all the label names for the series referred to by the postings. // The names returned are sorted. - LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) + LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) // Close releases the underlying resources of the reader. Close() error @@ -551,10 +551,10 @@ func (r blockIndexReader) LabelValueFor(ctx context.Context, id storage.SeriesRe return r.ir.LabelValueFor(ctx, id, label) } -// LabelNamesFor returns all the label names for the series referred to by IDs. +// LabelNamesFor returns all the label names for the series referred to by the postings. // The names returned are sorted. -func (r blockIndexReader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) { - return r.ir.LabelNamesFor(ctx, ids...) +func (r blockIndexReader) LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) { + return r.ir.LabelNamesFor(ctx, postings) } type blockTombstoneReader struct { diff --git a/tsdb/head_read.go b/tsdb/head_read.go index c53e10956b..689972f1b7 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -267,15 +267,17 @@ func (h *headIndexReader) LabelValueFor(_ context.Context, id storage.SeriesRef, return value, nil } -// LabelNamesFor returns all the label names for the series referred to by IDs. +// LabelNamesFor returns all the label names for the series referred to by the postings. // The names returned are sorted. -func (h *headIndexReader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) { +func (h *headIndexReader) LabelNamesFor(ctx context.Context, series index.Postings) ([]string, error) { namesMap := make(map[string]struct{}) - for _, id := range ids { - if ctx.Err() != nil { + i := 0 + for series.Next() { + i++ + if i%checkContextEveryNIterations == 0 && ctx.Err() != nil { return nil, ctx.Err() } - memSeries := h.head.series.getByID(chunks.HeadSeriesRef(id)) + memSeries := h.head.series.getByID(chunks.HeadSeriesRef(series.At())) if memSeries == nil { // Series not found, this happens during compaction, // when series was garbage collected after the caller got the series IDs. @@ -285,6 +287,9 @@ func (h *headIndexReader) LabelNamesFor(ctx context.Context, ids ...storage.Seri namesMap[lbl.Name] = struct{}{} }) } + if err := series.Err(); err != nil { + return nil, err + } names := make([]string, 0, len(namesMap)) for name := range namesMap { names = append(names, name) diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 480e6a8fc7..8172b81ce3 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -1551,11 +1551,14 @@ func (r *Reader) LabelValues(ctx context.Context, name string, matchers ...*labe // LabelNamesFor returns all the label names for the series referred to by IDs. // The names returned are sorted. -func (r *Reader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) { +func (r *Reader) LabelNamesFor(ctx context.Context, postings Postings) ([]string, error) { // Gather offsetsMap the name offsetsMap in the symbol table first offsetsMap := make(map[uint32]struct{}) - for _, id := range ids { - if ctx.Err() != nil { + i := 0 + for postings.Next() { + id := postings.At() + + if i%checkContextEveryNIterations == 0 && ctx.Err() != nil { return nil, ctx.Err() } diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index af431d678f..3b5adf80c9 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -483,7 +483,7 @@ func (ir *OOOCompactionHeadIndexReader) LabelValueFor(context.Context, storage.S return "", errors.New("not implemented") } -func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) { +func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) { return nil, errors.New("not implemented") } diff --git a/tsdb/querier.go b/tsdb/querier.go index 1071c4a716..fb4a87cc8c 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -447,16 +447,7 @@ func labelNamesWithMatchers(ctx context.Context, r IndexReader, matchers ...*lab if err != nil { return nil, err } - - var postings []storage.SeriesRef - for p.Next() { - postings = append(postings, p.At()) - } - if err := p.Err(); err != nil { - return nil, fmt.Errorf("postings for label names with matchers: %w", err) - } - - return r.LabelNamesFor(ctx, postings...) + return r.LabelNamesFor(ctx, p) } // seriesData, used inside other iterators, are updated when we move from one series to another. diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index c7e60a0e16..a1af49465d 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -2292,13 +2292,16 @@ func (m mockIndex) LabelValueFor(_ context.Context, id storage.SeriesRef, label return m.series[id].l.Get(label), nil } -func (m mockIndex) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) { +func (m mockIndex) LabelNamesFor(_ context.Context, postings index.Postings) ([]string, error) { namesMap := make(map[string]bool) - for _, id := range ids { - m.series[id].l.Range(func(lbl labels.Label) { + for postings.Next() { + m.series[postings.At()].l.Range(func(lbl labels.Label) { namesMap[lbl.Name] = true }) } + if err := postings.Err(); err != nil { + return nil, err + } names := make([]string, 0, len(namesMap)) for name := range namesMap { names = append(names, name) @@ -3232,7 +3235,7 @@ func (m mockMatcherIndex) LabelValueFor(context.Context, storage.SeriesRef, stri return "", errors.New("label value for called") } -func (m mockMatcherIndex) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) { +func (m mockMatcherIndex) LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) { return nil, errors.New("label names for for called") } @@ -3693,7 +3696,7 @@ func (m mockReaderOfLabels) LabelNames(context.Context, ...*labels.Matcher) ([]s panic("LabelNames called") } -func (m mockReaderOfLabels) LabelNamesFor(context.Context, ...storage.SeriesRef) ([]string, error) { +func (m mockReaderOfLabels) LabelNamesFor(context.Context, index.Postings) ([]string, error) { panic("LabelNamesFor called") } From 05380aa0ac3d2df2c549d4d4c863d850da7a9886 Mon Sep 17 00:00:00 2001 From: Sebastian Rabenhorst <4246554+rabenhorst@users.noreply.github.com> Date: Wed, 12 Jun 2024 16:07:42 +0200 Subject: [PATCH 23/44] agent db: make rejecting ooo samples configurable (#14094) feat: Make OOO ingestion time window configurable for Prometheus Agent. Signed-off-by: Sebastian Rabenhorst --- cmd/prometheus/main.go | 24 ++++++++----- docs/configuration/configuration.md | 4 +++ tsdb/agent/db.go | 38 ++++++++++++++++---- tsdb/agent/db_test.go | 55 ++++++++++++++++++++++++++++- 4 files changed, 104 insertions(+), 17 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 8db2f2c5eb..cd7f533d1c 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -1197,7 +1197,7 @@ func main() { } if agentMode { // WAL storage. - opts := cfg.agent.ToAgentOptions() + opts := cfg.agent.ToAgentOptions(cfg.tsdb.OutOfOrderTimeWindow) cancel := make(chan struct{}) g.Add( func() error { @@ -1233,6 +1233,7 @@ func main() { "TruncateFrequency", cfg.agent.TruncateFrequency, "MinWALTime", cfg.agent.MinWALTime, "MaxWALTime", cfg.agent.MaxWALTime, + "OutOfOrderTimeWindow", cfg.agent.OutOfOrderTimeWindow, ) localStorage.Set(db, 0) @@ -1736,17 +1737,22 @@ type agentOptions struct { TruncateFrequency model.Duration MinWALTime, MaxWALTime model.Duration NoLockfile bool + OutOfOrderTimeWindow int64 } -func (opts agentOptions) ToAgentOptions() agent.Options { +func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Options { + if outOfOrderTimeWindow < 0 { + outOfOrderTimeWindow = 0 + } return agent.Options{ - WALSegmentSize: int(opts.WALSegmentSize), - WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType), - StripeSize: opts.StripeSize, - TruncateFrequency: time.Duration(opts.TruncateFrequency), - MinWALTime: durationToInt64Millis(time.Duration(opts.MinWALTime)), - MaxWALTime: durationToInt64Millis(time.Duration(opts.MaxWALTime)), - NoLockfile: opts.NoLockfile, + WALSegmentSize: int(opts.WALSegmentSize), + WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType), + StripeSize: opts.StripeSize, + TruncateFrequency: time.Duration(opts.TruncateFrequency), + MinWALTime: durationToInt64Millis(time.Duration(opts.MinWALTime)), + MaxWALTime: durationToInt64Millis(time.Duration(opts.MaxWALTime)), + NoLockfile: opts.NoLockfile, + OutOfOrderTimeWindow: outOfOrderTimeWindow, } } diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index f0e13cf136..b83219700a 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -3813,6 +3813,10 @@ NOTE: Out-of-order ingestion is an experimental feature, but you do not need any # into the TSDB, i.e. it is an in-order sample or an out-of-order/out-of-bounds sample # that is within the out-of-order window, or (b) too-old, i.e. not in-order # and before the out-of-order window. +# +# When out_of_order_time_window is greater than 0, it also affects experimental agent. It allows +# the agent's WAL to accept out-of-order samples that fall within the specified time window relative +# to the timestamp of the last appended sample for the same series. [ out_of_order_time_window: | default = 0s ] ``` diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 513c2ed5a3..1b6df3af0f 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -81,19 +81,23 @@ type Options struct { // NoLockfile disables creation and consideration of a lock file. NoLockfile bool + + // OutOfOrderTimeWindow specifies how much out of order is allowed, if any. + OutOfOrderTimeWindow int64 } // DefaultOptions used for the WAL storage. They are reasonable for setups using // millisecond-precision timestamps. func DefaultOptions() *Options { return &Options{ - WALSegmentSize: wlog.DefaultSegmentSize, - WALCompression: wlog.CompressionNone, - StripeSize: tsdb.DefaultStripeSize, - TruncateFrequency: DefaultTruncateFrequency, - MinWALTime: DefaultMinWALTime, - MaxWALTime: DefaultMaxWALTime, - NoLockfile: false, + WALSegmentSize: wlog.DefaultSegmentSize, + WALCompression: wlog.CompressionNone, + StripeSize: tsdb.DefaultStripeSize, + TruncateFrequency: DefaultTruncateFrequency, + MinWALTime: DefaultMinWALTime, + MaxWALTime: DefaultMaxWALTime, + NoLockfile: false, + OutOfOrderTimeWindow: 0, } } @@ -812,6 +816,11 @@ func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v flo series.Lock() defer series.Unlock() + if t <= a.minValidTime(series.lastTs) { + a.metrics.totalOutOfOrderSamples.Inc() + return 0, storage.ErrOutOfOrderSample + } + // NOTE: always modify pendingSamples and sampleSeries together. a.pendingSamples = append(a.pendingSamples, record.RefSample{ Ref: series.ref, @@ -935,6 +944,11 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int series.Lock() defer series.Unlock() + if t <= a.minValidTime(series.lastTs) { + a.metrics.totalOutOfOrderSamples.Inc() + return 0, storage.ErrOutOfOrderSample + } + switch { case h != nil: // NOTE: always modify pendingHistograms and histogramSeries together @@ -1103,3 +1117,13 @@ func (a *appender) logSeries() error { return nil } + +// mintTs returns the minimum timestamp that a sample can have +// and is needed for preventing underflow. +func (a *appender) minValidTime(lastTs int64) int64 { + if lastTs < math.MinInt64+a.opts.OutOfOrderTimeWindow { + return math.MinInt64 + } + + return lastTs - a.opts.OutOfOrderTimeWindow +} diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index a7dae32208..b984e6bc09 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -16,6 +16,7 @@ package agent import ( "context" "fmt" + "math" "path/filepath" "strconv" "testing" @@ -761,7 +762,9 @@ func TestDBAllowOOOSamples(t *testing.T) { ) reg := prometheus.NewRegistry() - s := createTestAgentDB(t, reg, DefaultOptions()) + opts := DefaultOptions() + opts.OutOfOrderTimeWindow = math.MaxInt64 + s := createTestAgentDB(t, reg, opts) app := s.Appender(context.TODO()) // Let's add some samples in the [offset, offset+numDatapoints) range. @@ -879,6 +882,56 @@ func TestDBAllowOOOSamples(t *testing.T) { require.NoError(t, db.Close()) } +func TestDBOutOfOrderTimeWindow(t *testing.T) { + tc := []struct { + outOfOrderTimeWindow, firstTs, secondTs int64 + expectedError error + }{ + {0, 100, 101, nil}, + {0, 100, 100, storage.ErrOutOfOrderSample}, + {0, 100, 99, storage.ErrOutOfOrderSample}, + {100, 100, 1, nil}, + {100, 100, 0, storage.ErrOutOfOrderSample}, + } + + for _, c := range tc { + t.Run(fmt.Sprintf("outOfOrderTimeWindow=%d, firstTs=%d, secondTs=%d, expectedError=%s", c.outOfOrderTimeWindow, c.firstTs, c.secondTs, c.expectedError), func(t *testing.T) { + reg := prometheus.NewRegistry() + opts := DefaultOptions() + opts.OutOfOrderTimeWindow = c.outOfOrderTimeWindow + s := createTestAgentDB(t, reg, opts) + app := s.Appender(context.TODO()) + + lbls := labelsForTest(t.Name()+"_histogram", 1) + lset := labels.New(lbls[0]...) + _, err := app.AppendHistogram(0, lset, c.firstTs, tsdbutil.GenerateTestHistograms(1)[0], nil) + require.NoError(t, err) + err = app.Commit() + require.NoError(t, err) + _, err = app.AppendHistogram(0, lset, c.secondTs, tsdbutil.GenerateTestHistograms(1)[0], nil) + require.ErrorIs(t, err, c.expectedError) + + lbls = labelsForTest(t.Name(), 1) + lset = labels.New(lbls[0]...) + _, err = app.Append(0, lset, c.firstTs, 0) + require.NoError(t, err) + err = app.Commit() + require.NoError(t, err) + _, err = app.Append(0, lset, c.secondTs, 0) + require.ErrorIs(t, err, c.expectedError) + + expectedAppendedSamples := float64(2) + if c.expectedError != nil { + expectedAppendedSamples = 1 + } + m := gatherFamily(t, reg, "prometheus_agent_samples_appended_total") + require.Equal(t, expectedAppendedSamples, m.Metric[0].Counter.GetValue(), "agent wal mismatch of total appended samples") + require.Equal(t, expectedAppendedSamples, m.Metric[1].Counter.GetValue(), "agent wal mismatch of total appended histograms") + require.NoError(t, s.Close()) + }) + } +} + func BenchmarkCreateSeries(b *testing.B) { s := createTestAgentDB(b, nil, DefaultOptions()) defer s.Close() From 5a218708f1f755a67b3d9b68549ccbbb6463d850 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Wed, 12 Jun 2024 14:31:25 -0700 Subject: [PATCH 24/44] tsdb: Extend compactor interface to allow compactions to create multiple output blocks (#14143) * add hook to allow head compaction to create multiple output blocks Signed-off-by: Ben Ye * change Compact interface; remove BlockPopulator changes Signed-off-by: Ben Ye * rebase main Signed-off-by: Ben Ye * fix lint Signed-off-by: Ben Ye * fix unit test Signed-off-by: Ben Ye * address feedbacks; add unit test Signed-off-by: Ben Ye * Apply suggestions from code review Signed-off-by: Ganesh Vernekar * Update tsdb/compact_test.go Signed-off-by: Ganesh Vernekar --------- Signed-off-by: Ben Ye Signed-off-by: Ganesh Vernekar Co-authored-by: Ganesh Vernekar --- cmd/promtool/backfill.go | 5 ++++ tsdb/block.go | 10 +++---- tsdb/block_test.go | 15 ++++++---- tsdb/blockwriter.go | 9 ++++-- tsdb/compact.go | 62 +++++++++++++++++++++------------------- tsdb/compact_test.go | 41 +++++++++++++++++++------- tsdb/db.go | 50 +++++++++++++++++--------------- tsdb/db_test.go | 48 ++++++++++++++++--------------- 8 files changed, 141 insertions(+), 99 deletions(-) diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go index 79db428c71..400cae421a 100644 --- a/cmd/promtool/backfill.go +++ b/cmd/promtool/backfill.go @@ -22,6 +22,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/oklog/ulid" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/textparse" @@ -191,6 +192,10 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn if quiet { break } + // Empty block, don't print. + if block.Compare(ulid.ULID{}) == 0 { + break + } blocks, err := db.Blocks() if err != nil { return fmt.Errorf("get blocks: %w", err) diff --git a/tsdb/block.go b/tsdb/block.go index d2e7aa6fac..2f32733f8c 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -646,10 +646,10 @@ Outer: } // CleanTombstones will remove the tombstones and rewrite the block (only if there are any tombstones). -// If there was a rewrite, then it returns the ULID of the new block written, else nil. -// If the resultant block is empty (tombstones covered the whole block), then it deletes the new block and return nil UID. +// If there was a rewrite, then it returns the ULID of new blocks written, else nil. +// If a resultant block is empty (tombstones covered the whole block), then it returns an empty slice. // It returns a boolean indicating if the parent block can be deleted safely of not. -func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, bool, error) { +func (pb *Block) CleanTombstones(dest string, c Compactor) ([]ulid.ULID, bool, error) { numStones := 0 if err := pb.tombstones.Iter(func(id storage.SeriesRef, ivs tombstones.Intervals) error { @@ -664,12 +664,12 @@ func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, bool, er } meta := pb.Meta() - uid, err := c.Write(dest, pb, pb.meta.MinTime, pb.meta.MaxTime, &meta) + uids, err := c.Write(dest, pb, pb.meta.MinTime, pb.meta.MaxTime, &meta) if err != nil { return nil, false, err } - return &uid, true, nil + return uids, true, nil } // Snapshot creates snapshot of the block into dir. diff --git a/tsdb/block_test.go b/tsdb/block_test.go index 42acc3c693..f2569e35be 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -346,9 +346,10 @@ func TestBlockSize(t *testing.T) { c, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{0}, nil, nil) require.NoError(t, err) - blockDirAfterCompact, err := c.Compact(tmpdir, []string{blockInit.Dir()}, nil) + blockDirsAfterCompact, err := c.Compact(tmpdir, []string{blockInit.Dir()}, nil) require.NoError(t, err) - blockAfterCompact, err := OpenBlock(nil, filepath.Join(tmpdir, blockDirAfterCompact.String()), nil) + require.Len(t, blockDirsAfterCompact, 1) + blockAfterCompact, err := OpenBlock(nil, filepath.Join(tmpdir, blockDirsAfterCompact[0].String()), nil) require.NoError(t, err) defer func() { require.NoError(t, blockAfterCompact.Close()) @@ -605,9 +606,10 @@ func createBlockFromHead(tb testing.TB, dir string, head *Head) string { // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). // Because of this block intervals are always +1 than the total samples it includes. - ulid, err := compactor.Write(dir, head, head.MinTime(), head.MaxTime()+1, nil) + ulids, err := compactor.Write(dir, head, head.MinTime(), head.MaxTime()+1, nil) require.NoError(tb, err) - return filepath.Join(dir, ulid.String()) + require.Len(tb, ulids, 1) + return filepath.Join(dir, ulids[0].String()) } func createBlockFromOOOHead(tb testing.TB, dir string, head *OOOCompactionHead) string { @@ -618,9 +620,10 @@ func createBlockFromOOOHead(tb testing.TB, dir string, head *OOOCompactionHead) // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). // Because of this block intervals are always +1 than the total samples it includes. - ulid, err := compactor.Write(dir, head, head.MinTime(), head.MaxTime()+1, nil) + ulids, err := compactor.Write(dir, head, head.MinTime(), head.MaxTime()+1, nil) require.NoError(tb, err) - return filepath.Join(dir, ulid.String()) + require.Len(tb, ulids, 1) + return filepath.Join(dir, ulids[0].String()) } func createHead(tb testing.TB, w *wlog.WL, series []storage.Series, chunkDir string) *Head { diff --git a/tsdb/blockwriter.go b/tsdb/blockwriter.go index 32346d69d0..232ec2b914 100644 --- a/tsdb/blockwriter.go +++ b/tsdb/blockwriter.go @@ -105,12 +105,17 @@ func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) { if err != nil { return ulid.ULID{}, fmt.Errorf("create leveled compactor: %w", err) } - id, err := compactor.Write(w.destinationDir, w.head, mint, maxt, nil) + ids, err := compactor.Write(w.destinationDir, w.head, mint, maxt, nil) if err != nil { return ulid.ULID{}, fmt.Errorf("compactor write: %w", err) } - return id, nil + // No block was produced. Caller is responsible to check empty + // ulid.ULID based on its use case. + if len(ids) == 0 { + return ulid.ULID{}, nil + } + return ids[0], nil } func (w *BlockWriter) Close() error { diff --git a/tsdb/compact.go b/tsdb/compact.go index c2ae23b2e4..3c921520f5 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -58,19 +58,23 @@ type Compactor interface { // Results returned when compactions are in progress are undefined. Plan(dir string) ([]string, error) - // Write persists a Block into a directory. - // No Block is written when resulting Block has 0 samples, and returns empty ulid.ULID{}. - Write(dest string, b BlockReader, mint, maxt int64, base *BlockMeta) (ulid.ULID, error) + // Write persists one or more Blocks into a directory. + // No Block is written when resulting Block has 0 samples and returns an empty slice. + // Prometheus always return one or no block. The interface allows returning more than one + // block for downstream users to experiment with compactor. + Write(dest string, b BlockReader, mint, maxt int64, base *BlockMeta) ([]ulid.ULID, error) // Compact runs compaction against the provided directories. Must // only be called concurrently with results of Plan(). // Can optionally pass a list of already open blocks, // to avoid having to reopen them. - // When resulting Block has 0 samples + // Prometheus always return one or no block. The interface allows returning more than one + // block for downstream users to experiment with compactor. + // When one resulting Block has 0 samples // * No block is written. // * The source dirs are marked Deletable. - // * Returns empty ulid.ULID{}. - Compact(dest string, dirs []string, open []*Block) (ulid.ULID, error) + // * Block is not included in the result. + Compact(dest string, dirs []string, open []*Block) ([]ulid.ULID, error) } // LeveledCompactor implements the Compactor interface. @@ -441,11 +445,11 @@ func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta { // Compact creates a new block in the compactor's directory from the blocks in the // provided directories. -func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (uid ulid.ULID, err error) { +func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) ([]ulid.ULID, error) { return c.CompactWithBlockPopulator(dest, dirs, open, DefaultBlockPopulator{}) } -func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, open []*Block, blockPopulator BlockPopulator) (uid ulid.ULID, err error) { +func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, open []*Block, blockPopulator BlockPopulator) ([]ulid.ULID, error) { var ( blocks []BlockReader bs []*Block @@ -457,7 +461,7 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, for _, d := range dirs { meta, _, err := readMetaFile(d) if err != nil { - return uid, err + return nil, err } var b *Block @@ -475,7 +479,7 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, var err error b, err = OpenBlock(c.logger, d, c.chunkPool) if err != nil { - return uid, err + return nil, err } defer b.Close() } @@ -486,10 +490,10 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, uids = append(uids, meta.ULID.String()) } - uid = ulid.MustNew(ulid.Now(), rand.Reader) + uid := ulid.MustNew(ulid.Now(), rand.Reader) meta := CompactBlockMetas(uid, metas...) - err = c.write(dest, meta, blockPopulator, blocks...) + err := c.write(dest, meta, blockPopulator, blocks...) if err == nil { if meta.Stats.NumSamples == 0 { for _, b := range bs { @@ -503,25 +507,25 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, } b.numBytesMeta = n } - uid = ulid.ULID{} level.Info(c.logger).Log( "msg", "compact blocks resulted in empty block", "count", len(blocks), "sources", fmt.Sprintf("%v", uids), "duration", time.Since(start), ) - } else { - level.Info(c.logger).Log( - "msg", "compact blocks", - "count", len(blocks), - "mint", meta.MinTime, - "maxt", meta.MaxTime, - "ulid", meta.ULID, - "sources", fmt.Sprintf("%v", uids), - "duration", time.Since(start), - ) + return nil, nil } - return uid, nil + + level.Info(c.logger).Log( + "msg", "compact blocks", + "count", len(blocks), + "mint", meta.MinTime, + "maxt", meta.MaxTime, + "ulid", meta.ULID, + "sources", fmt.Sprintf("%v", uids), + "duration", time.Since(start), + ) + return []ulid.ULID{uid}, nil } errs := tsdb_errors.NewMulti(err) @@ -533,10 +537,10 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, } } - return uid, errs.Err() + return nil, errs.Err() } -func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, base *BlockMeta) (ulid.ULID, error) { +func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, base *BlockMeta) ([]ulid.ULID, error) { start := time.Now() uid := ulid.MustNew(ulid.Now(), rand.Reader) @@ -560,7 +564,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b err := c.write(dest, meta, DefaultBlockPopulator{}, b) if err != nil { - return uid, err + return nil, err } if meta.Stats.NumSamples == 0 { @@ -570,7 +574,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b "maxt", meta.MaxTime, "duration", time.Since(start), ) - return ulid.ULID{}, nil + return nil, nil } level.Info(c.logger).Log( @@ -581,7 +585,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b "duration", time.Since(start), "ooo", meta.Compaction.FromOutOfOrder(), ) - return uid, nil + return []ulid.ULID{uid}, nil } // instrumentedChunkWriter is used for level 1 compactions to record statistics diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 7a353a556a..5ce163f1ef 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -1484,12 +1484,12 @@ func TestHeadCompactionWithHistograms(t *testing.T) { maxt := head.MaxTime() + 1 // Block intervals are half-open: [b.MinTime, b.MaxTime). compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{DefaultBlockDuration}, chunkenc.NewPool(), nil) require.NoError(t, err) - id, err := compactor.Write(head.opts.ChunkDirRoot, head, mint, maxt, nil) + ids, err := compactor.Write(head.opts.ChunkDirRoot, head, mint, maxt, nil) require.NoError(t, err) - require.NotEqual(t, ulid.ULID{}, id) + require.Len(t, ids, 1) // Open the block and query it and check the histograms. - block, err := OpenBlock(nil, path.Join(head.opts.ChunkDirRoot, id.String()), nil) + block, err := OpenBlock(nil, path.Join(head.opts.ChunkDirRoot, ids[0].String()), nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, block.Close()) @@ -1598,8 +1598,8 @@ func TestSparseHistogramSpaceSavings(t *testing.T) { sparseApp := sparseHead.Appender(context.Background()) numOldSeriesPerHistogram := 0 - var oldULID ulid.ULID - var sparseULID ulid.ULID + var oldULIDs []ulid.ULID + var sparseULIDs []ulid.ULID var wg sync.WaitGroup @@ -1626,9 +1626,9 @@ func TestSparseHistogramSpaceSavings(t *testing.T) { maxt := sparseHead.MaxTime() + 1 // Block intervals are half-open: [b.MinTime, b.MaxTime). compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{DefaultBlockDuration}, chunkenc.NewPool(), nil) require.NoError(t, err) - sparseULID, err = compactor.Write(sparseHead.opts.ChunkDirRoot, sparseHead, mint, maxt, nil) + sparseULIDs, err = compactor.Write(sparseHead.opts.ChunkDirRoot, sparseHead, mint, maxt, nil) require.NoError(t, err) - require.NotEqual(t, ulid.ULID{}, sparseULID) + require.Len(t, sparseULIDs, 1) }() wg.Add(1) @@ -1677,15 +1677,15 @@ func TestSparseHistogramSpaceSavings(t *testing.T) { maxt := oldHead.MaxTime() + 1 // Block intervals are half-open: [b.MinTime, b.MaxTime). compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{DefaultBlockDuration}, chunkenc.NewPool(), nil) require.NoError(t, err) - oldULID, err = compactor.Write(oldHead.opts.ChunkDirRoot, oldHead, mint, maxt, nil) + oldULIDs, err = compactor.Write(oldHead.opts.ChunkDirRoot, oldHead, mint, maxt, nil) require.NoError(t, err) - require.NotEqual(t, ulid.ULID{}, oldULID) + require.Len(t, oldULIDs, 1) }() wg.Wait() - oldBlockDir := filepath.Join(oldHead.opts.ChunkDirRoot, oldULID.String()) - sparseBlockDir := filepath.Join(sparseHead.opts.ChunkDirRoot, sparseULID.String()) + oldBlockDir := filepath.Join(oldHead.opts.ChunkDirRoot, oldULIDs[0].String()) + sparseBlockDir := filepath.Join(sparseHead.opts.ChunkDirRoot, sparseULIDs[0].String()) oldSize, err := fileutil.DirSize(oldBlockDir) require.NoError(t, err) @@ -1846,3 +1846,22 @@ func TestCompactBlockMetas(t *testing.T) { } require.Equal(t, expected, output) } + +func TestCompactEmptyResultBlockWithTombstone(t *testing.T) { + ctx := context.Background() + tmpdir := t.TempDir() + blockDir := createBlock(t, tmpdir, genSeries(1, 1, 0, 10)) + block, err := OpenBlock(nil, blockDir, nil) + require.NoError(t, err) + // Write tombstone covering the whole block. + err = block.Delete(ctx, 0, 10, labels.MustNewMatcher(labels.MatchEqual, defaultLabelName, "0")) + require.NoError(t, err) + + c, err := NewLeveledCompactor(ctx, nil, log.NewNopLogger(), []int64{0}, nil, nil) + require.NoError(t, err) + + ulids, err := c.Compact(tmpdir, []string{blockDir}, []*Block{block}) + require.NoError(t, err) + require.Nil(t, ulids) + require.NoError(t, block.Close()) +} diff --git a/tsdb/db.go b/tsdb/db.go index 5651b403e5..c44737c692 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -1336,13 +1336,11 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID for t := blockSize * (oooHeadMint / blockSize); t <= oooHeadMaxt; t += blockSize { mint, maxt := t, t+blockSize // Block intervals are half-open: [b.MinTime, b.MaxTime). Block intervals are always +1 than the total samples it includes. - uid, err := db.compactor.Write(dest, oooHead.CloneForTimeRange(mint, maxt-1), mint, maxt, meta) + uids, err := db.compactor.Write(dest, oooHead.CloneForTimeRange(mint, maxt-1), mint, maxt, meta) if err != nil { return nil, err } - if uid.Compare(ulid.ULID{}) != 0 { - ulids = append(ulids, uid) - } + ulids = append(ulids, uids...) } if len(ulids) == 0 { @@ -1364,19 +1362,19 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID // compactHead compacts the given RangeHead. // The compaction mutex should be held before calling this method. func (db *DB) compactHead(head *RangeHead) error { - uid, err := db.compactor.Write(db.dir, head, head.MinTime(), head.BlockMaxTime(), nil) + uids, err := db.compactor.Write(db.dir, head, head.MinTime(), head.BlockMaxTime(), nil) if err != nil { return fmt.Errorf("persist head block: %w", err) } if err := db.reloadBlocks(); err != nil { - if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil { - return tsdb_errors.NewMulti( - fmt.Errorf("reloadBlocks blocks: %w", err), - fmt.Errorf("delete persisted head block after failed db reloadBlocks:%s: %w", uid, errRemoveAll), - ).Err() + multiErr := tsdb_errors.NewMulti(fmt.Errorf("reloadBlocks blocks: %w", err)) + for _, uid := range uids { + if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil { + multiErr.Add(fmt.Errorf("delete persisted head block after failed db reloadBlocks:%s: %w", uid, errRemoveAll)) + } } - return fmt.Errorf("reloadBlocks blocks: %w", err) + return multiErr.Err() } if err = db.head.truncateMemory(head.BlockMaxTime()); err != nil { return fmt.Errorf("head memory truncate: %w", err) @@ -1411,16 +1409,19 @@ func (db *DB) compactBlocks() (err error) { default: } - uid, err := db.compactor.Compact(db.dir, plan, db.blocks) + uids, err := db.compactor.Compact(db.dir, plan, db.blocks) if err != nil { return fmt.Errorf("compact %s: %w", plan, err) } if err := db.reloadBlocks(); err != nil { - if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil { - return fmt.Errorf("delete compacted block after failed db reloadBlocks:%s: %w", uid, err) + errs := tsdb_errors.NewMulti(fmt.Errorf("reloadBlocks blocks: %w", err)) + for _, uid := range uids { + if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil { + errs.Add(fmt.Errorf("delete persisted block after failed db reloadBlocks:%s: %w", uid, errRemoveAll)) + } } - return fmt.Errorf("reloadBlocks blocks: %w", err) + return errs.Err() } } @@ -1541,12 +1542,15 @@ func (db *DB) reloadBlocks() (err error) { oldBlocks := db.blocks db.blocks = toLoad - blockMetas := make([]BlockMeta, 0, len(toLoad)) - for _, b := range toLoad { - blockMetas = append(blockMetas, b.Meta()) - } - if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 { - level.Warn(db.logger).Log("msg", "Overlapping blocks found during reloadBlocks", "detail", overlaps.String()) + // Only check overlapping blocks when overlapping compaction is enabled. + if db.opts.EnableOverlappingCompaction { + blockMetas := make([]BlockMeta, 0, len(toLoad)) + for _, b := range toLoad { + blockMetas = append(blockMetas, b.Meta()) + } + if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 { + level.Warn(db.logger).Log("msg", "Overlapping blocks found during reloadBlocks", "detail", overlaps.String()) + } } // Append blocks to old, deletable blocks, so we can close them. @@ -2149,7 +2153,7 @@ func (db *DB) CleanTombstones() (err error) { cleanUpCompleted = true for _, pb := range db.Blocks() { - uid, safeToDelete, cleanErr := pb.CleanTombstones(db.Dir(), db.compactor) + uids, safeToDelete, cleanErr := pb.CleanTombstones(db.Dir(), db.compactor) if cleanErr != nil { return fmt.Errorf("clean tombstones: %s: %w", pb.Dir(), cleanErr) } @@ -2173,7 +2177,7 @@ func (db *DB) CleanTombstones() (err error) { } // Delete new block if it was created. - if uid != nil && *uid != (ulid.ULID{}) { + for _, uid := range uids { dir := filepath.Join(db.Dir(), uid.String()) if err := os.RemoveAll(dir); err != nil { level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err) diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 69c9f60e32..3d2fb2d99d 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -1431,9 +1431,9 @@ func (*mockCompactorFailing) Plan(string) ([]string, error) { return nil, nil } -func (c *mockCompactorFailing) Write(dest string, _ BlockReader, _, _ int64, _ *BlockMeta) (ulid.ULID, error) { +func (c *mockCompactorFailing) Write(dest string, _ BlockReader, _, _ int64, _ *BlockMeta) ([]ulid.ULID, error) { if len(c.blocks) >= c.max { - return ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail") + return []ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail") } block, err := OpenBlock(nil, createBlock(c.t, dest, genSeries(1, 1, 0, 1)), nil) @@ -1452,11 +1452,11 @@ func (c *mockCompactorFailing) Write(dest string, _ BlockReader, _, _ int64, _ * require.Equal(c.t, expectedBlocks, actualBlockDirs) - return block.Meta().ULID, nil + return []ulid.ULID{block.Meta().ULID}, nil } -func (*mockCompactorFailing) Compact(string, []string, []*Block) (ulid.ULID, error) { - return ulid.ULID{}, nil +func (*mockCompactorFailing) Compact(string, []string, []*Block) ([]ulid.ULID, error) { + return []ulid.ULID{}, nil } func (*mockCompactorFailing) CompactOOO(string, *OOOCompactionHead) (result []ulid.ULID, err error) { @@ -6804,9 +6804,9 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) { for _, b := range blocks { blockDirs = append(blockDirs, b.Dir()) } - id, err := db.compactor.Compact(db.Dir(), blockDirs, blocks) + ids, err := db.compactor.Compact(db.Dir(), blockDirs, blocks) require.NoError(t, err) - require.NotEqual(t, ulid.ULID{}, id) + require.Len(t, ids, 1) require.NoError(t, db.reload()) require.Len(t, db.Blocks(), 1) @@ -7068,19 +7068,19 @@ func requireEqualOOOSamples(t *testing.T, expectedSamples int, db *DB) { type mockCompactorFn struct { planFn func() ([]string, error) - compactFn func() (ulid.ULID, error) - writeFn func() (ulid.ULID, error) + compactFn func() ([]ulid.ULID, error) + writeFn func() ([]ulid.ULID, error) } func (c *mockCompactorFn) Plan(_ string) ([]string, error) { return c.planFn() } -func (c *mockCompactorFn) Compact(_ string, _ []string, _ []*Block) (ulid.ULID, error) { +func (c *mockCompactorFn) Compact(_ string, _ []string, _ []*Block) ([]ulid.ULID, error) { return c.compactFn() } -func (c *mockCompactorFn) Write(_ string, _ BlockReader, _, _ int64, _ *BlockMeta) (ulid.ULID, error) { +func (c *mockCompactorFn) Write(_ string, _ BlockReader, _, _ int64, _ *BlockMeta) ([]ulid.ULID, error) { return c.writeFn() } @@ -7112,11 +7112,11 @@ func TestAbortBlockCompactions(t *testing.T) { // Our custom Plan() will always return something to compact. return []string{"1", "2", "3"}, nil }, - compactFn: func() (ulid.ULID, error) { - return ulid.ULID{}, nil + compactFn: func() ([]ulid.ULID, error) { + return []ulid.ULID{}, nil }, - writeFn: func() (ulid.ULID, error) { - return ulid.ULID{}, nil + writeFn: func() ([]ulid.ULID, error) { + return []ulid.ULID{}, nil }, } @@ -7135,11 +7135,11 @@ func TestNewCompactorFunc(t *testing.T) { planFn: func() ([]string, error) { return []string{block1.String(), block2.String()}, nil }, - compactFn: func() (ulid.ULID, error) { - return block1, nil + compactFn: func() ([]ulid.ULID, error) { + return []ulid.ULID{block1}, nil }, - writeFn: func() (ulid.ULID, error) { - return block2, nil + writeFn: func() ([]ulid.ULID, error) { + return []ulid.ULID{block2}, nil }, }, nil } @@ -7150,10 +7150,12 @@ func TestNewCompactorFunc(t *testing.T) { plans, err := db.compactor.Plan("") require.NoError(t, err) require.Equal(t, []string{block1.String(), block2.String()}, plans) - ulid, err := db.compactor.Compact("", nil, nil) + ulids, err := db.compactor.Compact("", nil, nil) require.NoError(t, err) - require.Equal(t, block1, ulid) - ulid, err = db.compactor.Write("", nil, 0, 1, nil) + require.Len(t, ulids, 1) + require.Equal(t, block1, ulids[0]) + ulids, err = db.compactor.Write("", nil, 0, 1, nil) require.NoError(t, err) - require.Equal(t, block2, ulid) + require.Len(t, ulids, 1) + require.Equal(t, block2, ulids[0]) } From 03cf6141d4b12655410392ef46cc74adba6ff3f4 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Thu, 13 Jun 2024 18:46:35 +0200 Subject: [PATCH 25/44] Fix Matcher.String() with empty label name When the label name is empty, which can happen now with quoted label name, it should be quoted when printed as a string again. Signed-off-by: Oleg Zaytsev --- model/labels/matcher.go | 2 +- promql/parser/printer_test.go | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/model/labels/matcher.go b/model/labels/matcher.go index 8e220e392d..a09c838e3f 100644 --- a/model/labels/matcher.go +++ b/model/labels/matcher.go @@ -101,7 +101,7 @@ func (m *Matcher) shouldQuoteName() bool { } return true } - return false + return len(m.Name) == 0 } // Matches returns whether the matcher matches the given string value. diff --git a/promql/parser/printer_test.go b/promql/parser/printer_test.go index f224d841d0..d2e301a884 100644 --- a/promql/parser/printer_test.go +++ b/promql/parser/printer_test.go @@ -148,6 +148,13 @@ func TestExprString(t *testing.T) { in: `{"_0"="1"}`, out: `{_0="1"}`, }, + { + in: `{""="0"}`, + }, + { + in: "{``=\"0\"}", + out: `{""="0"}`, + }, } for _, test := range inputs { From 4c1e71fa0b3d33754f4d906caa86ba9f73ebafb3 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Fri, 14 Jun 2024 15:02:46 +0200 Subject: [PATCH 26/44] Reduce the flakiness of TestAsyncRuleEvaluation (#14300) * Reduce the flakiness of TestAsyncRuleEvaluation This tests sleeps for 15 millisecond per rule group, and then comprares the entire execution time to be smaller than a multiple of that delay. The ruleCount is 6, so it assumes that the test will come to the assertions in less than 90ms. Meanwhile, the Github's Windows runner: - ...Huh, oh? What? How much time? milliwhat? Sorry I don't speak that. TL;DR, this increases the delay to 250 millisecond. This won't prevent the test from being flaky, but will reduce the flakiness by several orders of magnitude and hopefully won't be an issue anymore. Signed-off-by: Oleg Zaytsev * Make tests parallel Signed-off-by: Oleg Zaytsev --------- Signed-off-by: Oleg Zaytsev --- rules/manager_test.go | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/rules/manager_test.go b/rules/manager_test.go index 11d1282bd3..d31bfc07aa 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -1910,18 +1910,12 @@ func TestDependencyMapUpdatesOnGroupUpdate(t *testing.T) { } func TestAsyncRuleEvaluation(t *testing.T) { - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - - var ( - inflightQueries atomic.Int32 - maxInflight atomic.Int32 - ) - t.Run("synchronous evaluation with independent rules", func(t *testing.T) { - // Reset. - inflightQueries.Store(0) - maxInflight.Store(0) + t.Parallel() + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) + inflightQueries := atomic.Int32{} + maxInflight := atomic.Int32{} ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) @@ -1949,9 +1943,11 @@ func TestAsyncRuleEvaluation(t *testing.T) { }) t.Run("asynchronous evaluation with independent and dependent rules", func(t *testing.T) { - // Reset. - inflightQueries.Store(0) - maxInflight.Store(0) + t.Parallel() + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) + inflightQueries := atomic.Int32{} + maxInflight := atomic.Int32{} ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) @@ -1985,9 +1981,11 @@ func TestAsyncRuleEvaluation(t *testing.T) { }) t.Run("asynchronous evaluation of all independent rules, insufficient concurrency", func(t *testing.T) { - // Reset. - inflightQueries.Store(0) - maxInflight.Store(0) + t.Parallel() + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) + inflightQueries := atomic.Int32{} + maxInflight := atomic.Int32{} ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) @@ -2021,9 +2019,11 @@ func TestAsyncRuleEvaluation(t *testing.T) { }) t.Run("asynchronous evaluation of all independent rules, sufficient concurrency", func(t *testing.T) { - // Reset. - inflightQueries.Store(0) - maxInflight.Store(0) + t.Parallel() + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) + inflightQueries := atomic.Int32{} + maxInflight := atomic.Int32{} ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) @@ -2098,7 +2098,7 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) { require.EqualValues(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount)) } -const artificialDelay = 15 * time.Millisecond +const artificialDelay = 250 * time.Millisecond func optsFactory(storage storage.Storage, maxInflight, inflightQueries *atomic.Int32, maxConcurrent int64) *ManagerOptions { var inflightMu sync.Mutex From e7db2e30a41b71e21f2477c0a756884bfba4579f Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Sat, 15 Jun 2024 11:43:26 -0700 Subject: [PATCH 27/44] fix check context cancellation not incrementing count Signed-off-by: Ben Ye --- tsdb/index/index.go | 1 + 1 file changed, 1 insertion(+) diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 8172b81ce3..09fb737bfd 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -1557,6 +1557,7 @@ func (r *Reader) LabelNamesFor(ctx context.Context, postings Postings) ([]string i := 0 for postings.Next() { id := postings.At() + i++ if i%checkContextEveryNIterations == 0 && ctx.Err() != nil { return nil, ctx.Err() From e121d073886be49cdf81ac1fa47e049e7d069c1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Sun, 16 Jun 2024 10:24:09 +0200 Subject: [PATCH 28/44] Prepare release 2.53.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- CHANGELOG.md | 13 +++---------- VERSION | 2 +- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/package.json | 2 +- web/ui/react-app/package.json | 4 ++-- 7 files changed, 17 insertions(+), 24 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0c6da426a6..82212858c7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,20 +2,13 @@ ## unreleased -## 2.53.0-rc.1 / 2024-06-11 +## 2.53.0 / 2024-06-16 This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 75. -* [CHANGE] Runtime: Change GOGC threshold from 50 to 75 #14285 -* [BUGFIX] Rules: Fix Group.Equals() to take in account the new queryOffset too. Followup to #14061. #14273 - -## 2.53.0-rc.0 / 2024-06-06 - -This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 50. - * [CHANGE] Rules: Execute 1 query instead of N (where N is the number of alerts within alert rule) when restoring alerts. #13980 #14048 -* [CHANGE] Runtime: Change GOGC threshold from 100 to 50 #14176 -* [FEATURE] Rules: Add new option `query_offset` for each rule group via rule group configuration file and `rule_query_offset` as part of the global configuration to have more resilience for remote write delays. #14061 #14216 +* [CHANGE] Runtime: Change GOGC threshold from 100 to 50 #14176 #14285 +* [FEATURE] Rules: Add new option `query_offset` for each rule group via rule group configuration file and `rule_query_offset` as part of the global configuration to have more resilience for remote write delays. #14061 #14216 #14273 * [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` metric to measure the time it takes to restore a rule group. #13974 * [ENHANCEMENT] OTLP: Improve remote write format translation performance by using label set hashes for metric identifiers instead of string based ones. #14006 #13991 * [ENHANCEMENT] TSDB: Optimize querying with regexp matchers. #13620 diff --git a/VERSION b/VERSION index 8d108ef311..261d95596f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.53.0-rc.1 +2.53.0 diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 5039e8bd26..519c333653 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.53.0-rc.1", + "version": "0.53.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.53.0-rc.1", + "@prometheus-io/lezer-promql": "0.53.0", "lru-cache": "^7.18.3" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 6b155d00e0..5a3b0055b7 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.53.0-rc.1", + "version": "0.53.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index e1610c1152..c8135d5e20 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.53.0-rc.1", + "version": "0.53.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.53.0-rc.1", + "version": "0.53.0", "workspaces": [ "react-app", "module/*" @@ -30,10 +30,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.53.0-rc.1", + "version": "0.53.0", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.53.0-rc.1", + "@prometheus-io/lezer-promql": "0.53.0", "lru-cache": "^7.18.3" }, "devDependencies": { @@ -69,7 +69,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.53.0-rc.1", + "version": "0.53.0", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.5.1", @@ -19233,7 +19233,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.53.0-rc.1", + "version": "0.53.0", "dependencies": { "@codemirror/autocomplete": "^6.11.1", "@codemirror/commands": "^6.3.2", @@ -19251,7 +19251,7 @@ "@lezer/lr": "^1.3.14", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.53.0-rc.1", + "@prometheus-io/codemirror-promql": "0.53.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.2", diff --git a/web/ui/package.json b/web/ui/package.json index 06f44fe221..8b924737d6 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -28,5 +28,5 @@ "ts-jest": "^29.1.1", "typescript": "^4.9.5" }, - "version": "0.53.0-rc.1" + "version": "0.53.0" } diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 63b3d60efc..7d9518e8d4 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.53.0-rc.1", + "version": "0.53.0", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.11.1", @@ -19,7 +19,7 @@ "@lezer/lr": "^1.3.14", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.53.0-rc.1", + "@prometheus-io/codemirror-promql": "0.53.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.2", From 0e6fca8e76baf59e4a5f67f398dbf08f654b8013 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Sun, 16 Jun 2024 12:09:42 -0700 Subject: [PATCH 29/44] add unit test Signed-off-by: Ben Ye --- tsdb/index/index.go | 6 ++++-- tsdb/index/index_test.go | 25 +++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 09fb737bfd..3621054598 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -1559,8 +1559,10 @@ func (r *Reader) LabelNamesFor(ctx context.Context, postings Postings) ([]string id := postings.At() i++ - if i%checkContextEveryNIterations == 0 && ctx.Err() != nil { - return nil, ctx.Err() + if i%checkContextEveryNIterations == 0 { + if ctxErr := ctx.Err(); ctxErr != nil { + return nil, ctxErr + } } offset := id diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go index 038caacf8e..d81dd8696c 100644 --- a/tsdb/index/index_test.go +++ b/tsdb/index/index_test.go @@ -634,6 +634,31 @@ func TestReader_PostingsForLabelMatchingHonorsContextCancel(t *testing.T) { require.Equal(t, failAfter, ctx.Count()) } +func TestReader_LabelNamesForHonorsContextCancel(t *testing.T) { + const seriesCount = 1000 + var input indexWriterSeriesSlice + for i := 1; i <= seriesCount; i++ { + input = append(input, &indexWriterSeries{ + labels: labels.FromStrings(labels.MetricName, fmt.Sprintf("%4d", i)), + chunks: []chunks.Meta{ + {Ref: 1, MinTime: 0, MaxTime: 10}, + }, + }) + } + ir, _, _ := createFileReader(context.Background(), t, input) + + name, value := AllPostingsKey() + p, err := ir.Postings(context.Background(), name, value) + require.NoError(t, err) + // We check context cancellation every 128 iterations so 3 will fail after + // iterating 3 * 128 series. + failAfter := uint64(3) + ctx := &testutil.MockContextErrAfter{FailAfter: failAfter} + _, err = ir.LabelNamesFor(ctx, p) + require.Error(t, err) + require.Equal(t, failAfter, ctx.Count()) +} + // createFileReader creates a temporary index file. It writes the provided input to this file. // It returns a Reader for this file, the file's name, and the symbol map. func createFileReader(ctx context.Context, tb testing.TB, input indexWriterSeriesSlice) (*Reader, string, map[string]struct{}) { From 987fa5c6a2782ebf2c9902b5708245741d007019 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Mon, 17 Jun 2024 16:43:01 +1000 Subject: [PATCH 30/44] Convert range query test cases to test scripting language Signed-off-by: Charles Korn --- promql/engine_test.go | 161 ------------------ promql/promqltest/testdata/range_queries.test | 73 ++++++++ 2 files changed, 73 insertions(+), 161 deletions(-) create mode 100644 promql/promqltest/testdata/range_queries.test diff --git a/promql/engine_test.go b/promql/engine_test.go index 69d9ea0361..2d13500b1d 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -3061,167 +3061,6 @@ func TestEngineOptsValidation(t *testing.T) { } } -func TestRangeQuery(t *testing.T) { - cases := []struct { - Name string - Load string - Query string - Result parser.Value - Start time.Time - End time.Time - Interval time.Duration - }{ - { - Name: "sum_over_time with all values", - Load: `load 30s - bar 0 1 10 100 1000`, - Query: "sum_over_time(bar[30s])", - Result: promql.Matrix{ - promql.Series{ - Floats: []promql.FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}}, - Metric: labels.EmptyLabels(), - }, - }, - Start: time.Unix(0, 0), - End: time.Unix(120, 0), - Interval: 60 * time.Second, - }, - { - Name: "sum_over_time with trailing values", - Load: `load 30s - bar 0 1 10 100 1000 0 0 0 0`, - Query: "sum_over_time(bar[30s])", - Result: promql.Matrix{ - promql.Series{ - Floats: []promql.FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}}, - Metric: labels.EmptyLabels(), - }, - }, - Start: time.Unix(0, 0), - End: time.Unix(120, 0), - Interval: 60 * time.Second, - }, - { - Name: "sum_over_time with all values long", - Load: `load 30s - bar 0 1 10 100 1000 10000 100000 1000000 10000000`, - Query: "sum_over_time(bar[30s])", - Result: promql.Matrix{ - promql.Series{ - Floats: []promql.FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}, {F: 110000, T: 180000}, {F: 11000000, T: 240000}}, - Metric: labels.EmptyLabels(), - }, - }, - Start: time.Unix(0, 0), - End: time.Unix(240, 0), - Interval: 60 * time.Second, - }, - { - Name: "sum_over_time with all values random", - Load: `load 30s - bar 5 17 42 2 7 905 51`, - Query: "sum_over_time(bar[30s])", - Result: promql.Matrix{ - promql.Series{ - Floats: []promql.FPoint{{F: 5, T: 0}, {F: 59, T: 60000}, {F: 9, T: 120000}, {F: 956, T: 180000}}, - Metric: labels.EmptyLabels(), - }, - }, - Start: time.Unix(0, 0), - End: time.Unix(180, 0), - Interval: 60 * time.Second, - }, - { - Name: "metric query", - Load: `load 30s - metric 1+1x4`, - Query: "metric", - Result: promql.Matrix{ - promql.Series{ - Floats: []promql.FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, - Metric: labels.FromStrings("__name__", "metric"), - }, - }, - Start: time.Unix(0, 0), - End: time.Unix(120, 0), - Interval: 1 * time.Minute, - }, - { - Name: "metric query with trailing values", - Load: `load 30s - metric 1+1x8`, - Query: "metric", - Result: promql.Matrix{ - promql.Series{ - Floats: []promql.FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, - Metric: labels.FromStrings("__name__", "metric"), - }, - }, - Start: time.Unix(0, 0), - End: time.Unix(120, 0), - Interval: 1 * time.Minute, - }, - { - Name: "short-circuit", - Load: `load 30s - foo{job="1"} 1+1x4 - bar{job="2"} 1+1x4`, - Query: `foo > 2 or bar`, - Result: promql.Matrix{ - promql.Series{ - Floats: []promql.FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, - Metric: labels.FromStrings( - "__name__", "bar", - "job", "2", - ), - }, - promql.Series{ - Floats: []promql.FPoint{{F: 3, T: 60000}, {F: 5, T: 120000}}, - Metric: labels.FromStrings( - "__name__", "foo", - "job", "1", - ), - }, - }, - Start: time.Unix(0, 0), - End: time.Unix(120, 0), - Interval: 1 * time.Minute, - }, - { - Name: "drop-metric-name", - Load: `load 30s - requests{job="1", __address__="bar"} 100`, - Query: `requests * 2`, - Result: promql.Matrix{ - promql.Series{ - Floats: []promql.FPoint{{F: 200, T: 0}, {F: 200, T: 60000}, {F: 200, T: 120000}}, - Metric: labels.FromStrings( - "__address__", "bar", - "job", "1", - ), - }, - }, - Start: time.Unix(0, 0), - End: time.Unix(120, 0), - Interval: 1 * time.Minute, - }, - } - for _, c := range cases { - t.Run(c.Name, func(t *testing.T) { - engine := newTestEngine() - storage := promqltest.LoadedStorage(t, c.Load) - t.Cleanup(func() { storage.Close() }) - - qry, err := engine.NewRangeQuery(context.Background(), storage, nil, c.Query, c.Start, c.End, c.Interval) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - testutil.RequireEqual(t, c.Result, res.Value) - }) - } -} - func TestInstantQueryWithRangeVectorSelector(t *testing.T) { engine := newTestEngine() diff --git a/promql/promqltest/testdata/range_queries.test b/promql/promqltest/testdata/range_queries.test new file mode 100644 index 0000000000..e695109602 --- /dev/null +++ b/promql/promqltest/testdata/range_queries.test @@ -0,0 +1,73 @@ +# sum_over_time with all values +load 30s + bar 0 1 10 100 1000 + +eval range from 0 to 2m step 1m sum_over_time(bar[30s]) + {} 0 11 1100 + +clear + +# sum_over_time with trailing values +load 30s + bar 0 1 10 100 1000 0 0 0 0 + +eval range from 0 to 2m step 1m sum_over_time(bar[30s]) + {} 0 11 1100 + +clear + +# sum_over_time with all values long +load 30s + bar 0 1 10 100 1000 10000 100000 1000000 10000000 + +eval range from 0 to 4m step 1m sum_over_time(bar[30s]) + {} 0 11 1100 110000 11000000 + +clear + +# sum_over_time with all values random +load 30s + bar 5 17 42 2 7 905 51 + +eval range from 0 to 3m step 1m sum_over_time(bar[30s]) + {} 5 59 9 956 + +clear + +# metric query +load 30s + metric 1+1x4 + +eval range from 0 to 2m step 1m metric + metric 1 3 5 + +clear + +# metric query with trailing values +load 30s + metric 1+1x8 + +eval range from 0 to 2m step 1m metric + metric 1 3 5 + +clear + +# short-circuit +load 30s + foo{job="1"} 1+1x4 + bar{job="2"} 1+1x4 + +eval range from 0 to 2m step 1m foo > 2 or bar + foo{job="1"} _ 3 5 + bar{job="2"} 1 3 5 + +clear + +# Drop metric name +load 30s + requests{job="1", __address__="bar"} 100 + +eval range from 0 to 2m step 1m requests * 2 + {job="1", __address__="bar"} 200 200 200 + +clear From aeec30f082012b3f3445d51ecdc30dfb15790a45 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Mon, 17 Jun 2024 16:56:56 +1000 Subject: [PATCH 31/44] Convert `TestTimestampFunction_StepsMoreOftenThanSamples` Signed-off-by: Charles Korn --- promql/engine_test.go | 41 ----------------------- promql/promqltest/testdata/functions.test | 8 +++++ 2 files changed, 8 insertions(+), 41 deletions(-) diff --git a/promql/engine_test.go b/promql/engine_test.go index 2d13500b1d..4e321a6c33 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -2015,47 +2015,6 @@ func TestSubquerySelector(t *testing.T) { } } -func TestTimestampFunction_StepsMoreOftenThanSamples(t *testing.T) { - engine := newTestEngine() - storage := promqltest.LoadedStorage(t, ` -load 1m - metric 0+1x1000 -`) - t.Cleanup(func() { storage.Close() }) - - query := "timestamp(metric)" - start := time.Unix(0, 0) - end := time.Unix(61, 0) - interval := time.Second - - // We expect the value to be 0 for t=0s to t=59s (inclusive), then 60 for t=60s and t=61s. - expectedPoints := []promql.FPoint{} - - for t := 0; t <= 59; t++ { - expectedPoints = append(expectedPoints, promql.FPoint{F: 0, T: int64(t * 1000)}) - } - - expectedPoints = append( - expectedPoints, - promql.FPoint{F: 60, T: 60_000}, - promql.FPoint{F: 60, T: 61_000}, - ) - - expectedResult := promql.Matrix{ - promql.Series{ - Floats: expectedPoints, - Metric: labels.EmptyLabels(), - }, - } - - qry, err := engine.NewRangeQuery(context.Background(), storage, nil, query, start, end, interval) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - testutil.RequireEqual(t, expectedResult, res.Value) -} - type FakeQueryLogger struct { closed bool logs []interface{} diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index 2c198374ac..7e741e9956 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -1213,3 +1213,11 @@ eval instant at 5m log10(exp_root_log - 20) {l="y"} -Inf clear + +# Test that timestamp() handles the scenario where there are more steps than samples. +load 1m + metric 0+1x1000 + +# We expect the value to be 0 for t=0s to t=59s (inclusive), then 60 for t=60s and t=61s. +eval range from 0 to 61s step 1s timestamp(metric) + {} 0x59 60 60 From 0fbf4a2529070ce6f8880b23b4834eeab6159fa6 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Mon, 17 Jun 2024 10:40:45 +0200 Subject: [PATCH 32/44] Export remote.ToLabelMatchers() Signed-off-by: Marco Pracucci --- storage/remote/codec.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/storage/remote/codec.go b/storage/remote/codec.go index 1228b23f5c..c3b815a4d3 100644 --- a/storage/remote/codec.go +++ b/storage/remote/codec.go @@ -95,7 +95,7 @@ func EncodeReadResponse(resp *prompb.ReadResponse, w http.ResponseWriter) error // ToQuery builds a Query proto. func ToQuery(from, to int64, matchers []*labels.Matcher, hints *storage.SelectHints) (*prompb.Query, error) { - ms, err := toLabelMatchers(matchers) + ms, err := ToLabelMatchers(matchers) if err != nil { return nil, err } @@ -566,7 +566,8 @@ func validateLabelsAndMetricName(ls []prompb.Label) error { return nil } -func toLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error) { +// ToLabelMatchers converts Prometheus label matchers to protobuf label matchers. +func ToLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error) { pbMatchers := make([]*prompb.LabelMatcher, 0, len(matchers)) for _, m := range matchers { var mType prompb.LabelMatcher_Type @@ -591,7 +592,7 @@ func toLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error) return pbMatchers, nil } -// FromLabelMatchers parses protobuf label matchers to Prometheus label matchers. +// FromLabelMatchers converts protobuf label matchers to Prometheus label matchers. func FromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, error) { result := make([]*labels.Matcher, 0, len(matchers)) for _, matcher := range matchers { From 4f78cc809c3f3cfd5a4d2bfce9b9bedacb7eb27a Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Tue, 18 Jun 2024 11:57:37 +0200 Subject: [PATCH 33/44] Refactor `toNormalisedLower`: shorter and slightly faster. (#14299) Refactor toNormalisedLower: shorter and slightly faster Signed-off-by: Oleg Zaytsev --- model/labels/labels_common.go | 5 +++++ model/labels/labels_dedupelabels.go | 5 ----- model/labels/labels_stringlabels.go | 5 ----- model/labels/regexp.go | 34 ++++++++--------------------- model/labels/regexp_test.go | 4 ++++ 5 files changed, 18 insertions(+), 35 deletions(-) diff --git a/model/labels/labels_common.go b/model/labels/labels_common.go index f46321c97e..4bc94f84fe 100644 --- a/model/labels/labels_common.go +++ b/model/labels/labels_common.go @@ -18,6 +18,7 @@ import ( "encoding/json" "slices" "strconv" + "unsafe" "github.com/prometheus/common/model" ) @@ -215,3 +216,7 @@ func contains(s []Label, n string) bool { } return false } + +func yoloString(b []byte) string { + return *((*string)(unsafe.Pointer(&b))) +} diff --git a/model/labels/labels_dedupelabels.go b/model/labels/labels_dedupelabels.go index dfc74aa3a3..972f5dc164 100644 --- a/model/labels/labels_dedupelabels.go +++ b/model/labels/labels_dedupelabels.go @@ -20,7 +20,6 @@ import ( "slices" "strings" "sync" - "unsafe" "github.com/cespare/xxhash/v2" ) @@ -426,10 +425,6 @@ func EmptyLabels() Labels { return Labels{} } -func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) -} - // New returns a sorted Labels from the given labels. // The caller has to guarantee that all label names are unique. // Note this function is not efficient; should not be used in performance-critical places. diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index 9ef764daec..bccceb61fe 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -299,11 +299,6 @@ func Equal(ls, o Labels) bool { func EmptyLabels() Labels { return Labels{} } - -func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) -} - func yoloBytes(s string) (b []byte) { *(*string)(unsafe.Pointer(&b)) = s (*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s) diff --git a/model/labels/regexp.go b/model/labels/regexp.go index 1f3f15eb07..1e9db882bf 100644 --- a/model/labels/regexp.go +++ b/model/labels/regexp.go @@ -798,39 +798,23 @@ func (m *equalMultiStringMapMatcher) Matches(s string) bool { // toNormalisedLower normalise the input string using "Unicode Normalization Form D" and then convert // it to lower case. func toNormalisedLower(s string) string { - // Check if the string is all ASCII chars and convert any upper case character to lower case character. - isASCII := true - var ( - b strings.Builder - pos int - ) - b.Grow(len(s)) + var buf []byte for i := 0; i < len(s); i++ { c := s[i] - if isASCII && c >= utf8.RuneSelf { - isASCII = false - break + if c >= utf8.RuneSelf { + return strings.Map(unicode.ToLower, norm.NFKD.String(s)) } if 'A' <= c && c <= 'Z' { - c += 'a' - 'A' - if pos < i { - b.WriteString(s[pos:i]) + if buf == nil { + buf = []byte(s) } - b.WriteByte(c) - pos = i + 1 + buf[i] = c + 'a' - 'A' } } - if pos < len(s) { - b.WriteString(s[pos:]) + if buf == nil { + return s } - - // Optimize for ASCII-only strings. In this case we don't have to do any normalization. - if isASCII { - return b.String() - } - - // Normalise and convert to lower. - return strings.Map(unicode.ToLower, norm.NFKD.String(b.String())) + return yoloString(buf) } // anyStringWithoutNewlineMatcher is a stringMatcher which matches any string diff --git a/model/labels/regexp_test.go b/model/labels/regexp_test.go index c86a5cae41..008eae702c 100644 --- a/model/labels/regexp_test.go +++ b/model/labels/regexp_test.go @@ -1209,6 +1209,10 @@ func visitStringMatcher(matcher StringMatcher, callback func(matcher StringMatch func TestToNormalisedLower(t *testing.T) { testCases := map[string]string{ "foo": "foo", + "FOO": "foo", + "Foo": "foo", + "foO": "foo", + "fOo": "foo", "AAAAAAAAAAAAAAAAAAAAAAAA": "aaaaaaaaaaaaaaaaaaaaaaaa", "cccccccccccccccccccccccC": "cccccccccccccccccccccccc", "ſſſſſſſſſſſſſſſſſſſſſſſſS": "sssssssssssssssssssssssss", From fd1a89b7c87ec6658708fc631d6af591223d2077 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Tue, 18 Jun 2024 12:28:56 +0200 Subject: [PATCH 34/44] Pass affected labels to `MemPostings.Delete()` (#14307) * Pass affected labels to MemPostings.Delete As suggested by @bboreham, we can track the labels of the deleted series and avoid iterating through all the label/value combinations. This looks much faster on the MemPostings.Delete call. We don't have a benchmark on stripeSeries.gc() where we'll pay the price of iterating the labels of each one of the deleted series. Signed-off-by: Oleg Zaytsev --- tsdb/head.go | 10 ++-- tsdb/head_test.go | 74 ++++++++++++++++++++++++++ tsdb/index/postings.go | 101 ++++++++---------------------------- tsdb/index/postings_test.go | 67 ++++++++++++------------ 4 files changed, 138 insertions(+), 114 deletions(-) diff --git a/tsdb/head.go b/tsdb/head.go index d5f7144fdb..5972a9c5d6 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -1552,7 +1552,7 @@ func (h *Head) gc() (actualInOrderMint, minOOOTime int64, minMmapFile int) { // Drop old chunks and remember series IDs and hashes if they can be // deleted entirely. - deleted, chunksRemoved, actualInOrderMint, minOOOTime, minMmapFile := h.series.gc(mint, minOOOMmapRef) + deleted, affected, chunksRemoved, actualInOrderMint, minOOOTime, minMmapFile := h.series.gc(mint, minOOOMmapRef) seriesRemoved := len(deleted) h.metrics.seriesRemoved.Add(float64(seriesRemoved)) @@ -1561,7 +1561,7 @@ func (h *Head) gc() (actualInOrderMint, minOOOTime int64, minMmapFile int) { h.numSeries.Sub(uint64(seriesRemoved)) // Remove deleted series IDs from the postings lists. - h.postings.Delete(deleted) + h.postings.Delete(deleted, affected) // Remove tombstones referring to the deleted series. h.tombstones.DeleteTombstones(deleted) @@ -1869,9 +1869,10 @@ func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *st // but the returned map goes into postings.Delete() which expects a map[storage.SeriesRef]struct // and there's no easy way to cast maps. // minMmapFile is the min mmap file number seen in the series (in-order and out-of-order) after gc'ing the series. -func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (_ map[storage.SeriesRef]struct{}, _ int, _, _ int64, minMmapFile int) { +func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (_ map[storage.SeriesRef]struct{}, _ map[labels.Label]struct{}, _ int, _, _ int64, minMmapFile int) { var ( deleted = map[storage.SeriesRef]struct{}{} + affected = map[labels.Label]struct{}{} rmChunks = 0 actualMint int64 = math.MaxInt64 minOOOTime int64 = math.MaxInt64 @@ -1927,6 +1928,7 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) ( } deleted[storage.SeriesRef(series.ref)] = struct{}{} + series.lset.Range(func(l labels.Label) { affected[l] = struct{}{} }) s.hashes[hashShard].del(hash, series.ref) delete(s.series[refShard], series.ref) deletedForCallback[series.ref] = series.lset @@ -1938,7 +1940,7 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) ( actualMint = mint } - return deleted, rmChunks, actualMint, minOOOTime, minMmapFile + return deleted, affected, rmChunks, actualMint, minOOOTime, minMmapFile } // The iterForDeletion function iterates through all series, invoking the checkDeletedFunc for each. diff --git a/tsdb/head_test.go b/tsdb/head_test.go index bb437ab598..93f046e5b3 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -814,6 +814,80 @@ func TestHead_UnknownWALRecord(t *testing.T) { require.NoError(t, head.Close()) } +// BenchmarkHead_Truncate is quite heavy, so consider running it with +// -benchtime=10x or similar to get more stable and comparable results. +func BenchmarkHead_Truncate(b *testing.B) { + const total = 1e6 + + prepare := func(b *testing.B, churn int) *Head { + h, _ := newTestHead(b, 1000, wlog.CompressionNone, false) + b.Cleanup(func() { + require.NoError(b, h.Close()) + }) + + h.initTime(0) + + internedItoa := map[int]string{} + var mtx sync.RWMutex + itoa := func(i int) string { + mtx.RLock() + s, ok := internedItoa[i] + mtx.RUnlock() + if ok { + return s + } + mtx.Lock() + s = strconv.Itoa(i) + internedItoa[i] = s + mtx.Unlock() + return s + } + + allSeries := [total]labels.Labels{} + nameValues := make([]string, 0, 100) + for i := 0; i < total; i++ { + nameValues = nameValues[:0] + + // A thousand labels like lbl_x_of_1000, each with total/1000 values + thousand := "lbl_" + itoa(i%1000) + "_of_1000" + nameValues = append(nameValues, thousand, itoa(i/1000)) + // A hundred labels like lbl_x_of_100, each with total/100 values. + hundred := "lbl_" + itoa(i%100) + "_of_100" + nameValues = append(nameValues, hundred, itoa(i/100)) + + if i%13 == 0 { + ten := "lbl_" + itoa(i%10) + "_of_10" + nameValues = append(nameValues, ten, itoa(i%10)) + } + + allSeries[i] = labels.FromStrings(append(nameValues, "first", "a", "second", "a", "third", "a")...) + s, _, _ := h.getOrCreate(allSeries[i].Hash(), allSeries[i]) + s.mmappedChunks = []*mmappedChunk{ + {minTime: 1000 * int64(i/churn), maxTime: 999 + 1000*int64(i/churn)}, + } + } + + return h + } + + for _, churn := range []int{10, 100, 1000} { + b.Run(fmt.Sprintf("churn=%d", churn), func(b *testing.B) { + if b.N > total/churn { + // Just to make sure that benchmark still makes sense. + panic("benchmark not prepared") + } + h := prepare(b, churn) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + require.NoError(b, h.Truncate(1000*int64(i))) + // Make sure the benchmark is meaningful and it's actually truncating the expected amount of series. + require.Equal(b, total-churn*i, int(h.NumSeries())) + } + }) + } +} + func TestHead_Truncate(t *testing.T) { h, _ := newTestHead(t, 1000, wlog.CompressionNone, false) defer func() { diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 6b654f6b5b..d9b5b69de0 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -288,89 +288,34 @@ func (p *MemPostings) EnsureOrder(numberOfConcurrentProcesses int) { } // Delete removes all ids in the given map from the postings lists. -func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}) { - // We will take an optimistic read lock for the entire method, - // and only lock for writing when we actually find something to delete. - // - // Each SeriesRef can appear in several Postings. - // To change each one, we need to know the label name and value that it is indexed under. - // We iterate over all label names, then for each name all values, - // and look for individual series to be deleted. - p.mtx.RLock() - defer p.mtx.RUnlock() +// affectedLabels contains all the labels that are affected by the deletion, there's no need to check other labels. +func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected map[labels.Label]struct{}) { + p.mtx.Lock() + defer p.mtx.Unlock() - // Collect all keys relevant for deletion once. New keys added afterwards - // can by definition not be affected by any of the given deletes. - keys := make([]string, 0, len(p.m)) - maxVals := 0 - for n := range p.m { - keys = append(keys, n) - if len(p.m[n]) > maxVals { - maxVals = len(p.m[n]) + process := func(l labels.Label) { + orig := p.m[l.Name][l.Value] + repl := make([]storage.SeriesRef, 0, len(orig)) + for _, id := range orig { + if _, ok := deleted[id]; !ok { + repl = append(repl, id) + } + } + if len(repl) > 0 { + p.m[l.Name][l.Value] = repl + } else { + delete(p.m[l.Name], l.Value) + // Delete the key if we removed all values. + if len(p.m[l.Name]) == 0 { + delete(p.m, l.Name) + } } } - vals := make([]string, 0, maxVals) - for _, n := range keys { - // Copy the values and iterate the copy: if we unlock in the loop below, - // another goroutine might modify the map while we are part-way through it. - vals = vals[:0] - for v := range p.m[n] { - vals = append(vals, v) - } - - // For each posting we first analyse whether the postings list is affected by the deletes. - // If no, we remove the label value from the vals list. - // This way we only need to Lock once later. - for i := 0; i < len(vals); { - found := false - refs := p.m[n][vals[i]] - for _, id := range refs { - if _, ok := deleted[id]; ok { - i++ - found = true - break - } - } - - if !found { - // Didn't match, bring the last value to this position, make the slice shorter and check again. - // The order of the slice doesn't matter as it comes from a map iteration. - vals[i], vals = vals[len(vals)-1], vals[:len(vals)-1] - } - } - - // If no label values have deleted ids, just continue. - if len(vals) == 0 { - continue - } - - // The only vals left here are the ones that contain deleted ids. - // Now we take the write lock and remove the ids. - p.mtx.RUnlock() - p.mtx.Lock() - for _, l := range vals { - repl := make([]storage.SeriesRef, 0, len(p.m[n][l])) - - for _, id := range p.m[n][l] { - if _, ok := deleted[id]; !ok { - repl = append(repl, id) - } - } - if len(repl) > 0 { - p.m[n][l] = repl - } else { - delete(p.m[n], l) - } - } - - // Delete the key if we removed all values. - if len(p.m[n]) == 0 { - delete(p.m, n) - } - p.mtx.Unlock() - p.mtx.RLock() + for l := range affected { + process(l) } + process(allPostingsKey) } // Iter calls f for each postings list. It aborts if f returns an error and returns it. diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index 4f34cc47ea..96c9ed124b 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -979,9 +979,13 @@ func TestMemPostings_Delete(t *testing.T) { p.Add(3, labels.FromStrings("lbl2", "a")) before := p.Get(allPostingsKey.Name, allPostingsKey.Value) - p.Delete(map[storage.SeriesRef]struct{}{ + deletedRefs := map[storage.SeriesRef]struct{}{ 2: {}, - }) + } + affectedLabels := map[labels.Label]struct{}{ + {Name: "lbl1", Value: "b"}: {}, + } + p.Delete(deletedRefs, affectedLabels) after := p.Get(allPostingsKey.Name, allPostingsKey.Value) // Make sure postings gotten before the delete have the old data when @@ -1022,33 +1026,23 @@ func BenchmarkMemPostings_Delete(b *testing.B) { } const total = 1e6 - prepare := func() *MemPostings { - var ref storage.SeriesRef - next := func() storage.SeriesRef { - ref++ - return ref + allSeries := [total]labels.Labels{} + nameValues := make([]string, 0, 100) + for i := 0; i < total; i++ { + nameValues = nameValues[:0] + + // A thousand labels like lbl_x_of_1000, each with total/1000 values + thousand := "lbl_" + itoa(i%1000) + "_of_1000" + nameValues = append(nameValues, thousand, itoa(i/1000)) + // A hundred labels like lbl_x_of_100, each with total/100 values. + hundred := "lbl_" + itoa(i%100) + "_of_100" + nameValues = append(nameValues, hundred, itoa(i/100)) + + if i < 100 { + ten := "lbl_" + itoa(i%10) + "_of_10" + nameValues = append(nameValues, ten, itoa(i%10)) } - - p := NewMemPostings() - nameValues := make([]string, 0, 100) - for i := 0; i < total; i++ { - nameValues = nameValues[:0] - - // A thousand labels like lbl_x_of_1000, each with total/1000 values - thousand := "lbl_" + itoa(i%1000) + "_of_1000" - nameValues = append(nameValues, thousand, itoa(i/1000)) - // A hundred labels like lbl_x_of_100, each with total/100 values. - hundred := "lbl_" + itoa(i%100) + "_of_100" - nameValues = append(nameValues, hundred, itoa(i/100)) - - if i < 100 { - ten := "lbl_" + itoa(i%10) + "_of_10" - nameValues = append(nameValues, ten, itoa(i%10)) - } - - p.Add(next(), labels.FromStrings(append(nameValues, "first", "a", "second", "a", "third", "a")...)) - } - return p + allSeries[i] = labels.FromStrings(append(nameValues, "first", "a", "second", "a", "third", "a")...) } for _, refs := range []int{1, 100, 10_000} { @@ -1060,7 +1054,11 @@ func BenchmarkMemPostings_Delete(b *testing.B) { panic("benchmark not prepared") } - p := prepare() + p := NewMemPostings() + for i := range allSeries { + p.Add(storage.SeriesRef(i), allSeries[i]) + } + stop := make(chan struct{}) wg := sync.WaitGroup{} for i := 0; i < reads; i++ { @@ -1086,11 +1084,16 @@ func BenchmarkMemPostings_Delete(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - deleted := map[storage.SeriesRef]struct{}{} + deleted := make(map[storage.SeriesRef]struct{}, refs) + affected := make(map[labels.Label]struct{}, refs) for i := 0; i < refs; i++ { - deleted[storage.SeriesRef(n*refs+i)] = struct{}{} + ref := storage.SeriesRef(n*refs + i) + deleted[ref] = struct{}{} + allSeries[ref].Range(func(l labels.Label) { + affected[l] = struct{}{} + }) } - p.Delete(deleted) + p.Delete(deleted, affected) } }) } From 29d3e482676cbd1dde29d701d56fbdc98ba8ee61 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Tue, 18 Jun 2024 13:45:53 +0200 Subject: [PATCH 35/44] Update CHANGELOG.md Co-authored-by: Julien <291750+roidelapluie@users.noreply.github.com> Signed-off-by: George Krajcsovits --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 82212858c7..7e47999934 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 75. * [CHANGE] Rules: Execute 1 query instead of N (where N is the number of alerts within alert rule) when restoring alerts. #13980 #14048 -* [CHANGE] Runtime: Change GOGC threshold from 100 to 50 #14176 #14285 +* [CHANGE] Runtime: Change GOGC threshold from 100 to 75 #14176 #14285 * [FEATURE] Rules: Add new option `query_offset` for each rule group via rule group configuration file and `rule_query_offset` as part of the global configuration to have more resilience for remote write delays. #14061 #14216 #14273 * [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` metric to measure the time it takes to restore a rule group. #13974 * [ENHANCEMENT] OTLP: Improve remote write format translation performance by using label set hashes for metric identifiers instead of string based ones. #14006 #13991 From 1c3f322f7832d6ebbba9dc3a13b398e03e014bcf Mon Sep 17 00:00:00 2001 From: Rens Groothuijsen Date: Tue, 18 Jun 2024 13:51:47 +0200 Subject: [PATCH 36/44] docs: mention implicitly watched directories in documentation (#14019) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs: mention implicitly watched directories in documentation Signed-off-by: Rens Groothuijsen * Add mention of atomic file renaming Co-authored-by: Ayoub Mrini Signed-off-by: Rens Groothuijsen --------- Signed-off-by: Rens Groothuijsen Co-authored-by: Ayoub Mrini Co-authored-by: Björn Rabenstein --- docs/configuration/configuration.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index b83219700a..5df7dae3c0 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -1608,7 +1608,16 @@ and serves as an interface to plug in custom service discovery mechanisms. It reads a set of files containing a list of zero or more ``s. Changes to all defined files are detected via disk watches -and applied immediately. Files may be provided in YAML or JSON format. Only +and applied immediately. + +While those individual files are watched for changes, +the parent directory is also watched implicitly. This is to handle [atomic +renaming](https://github.com/fsnotify/fsnotify/blob/c1467c02fba575afdb5f4201072ab8403bbf00f4/README.md?plain=1#L128) efficiently and to detect new files that match the configured globs. +This may cause issues if the parent directory contains a large number of other files, +as each of these files will be watched too, even though the events related +to them are not relevant. + +Files may be provided in YAML or JSON format. Only changes resulting in well-formed target groups are applied. Files must contain a list of static configs, using these formats: From be975bf8d729ff50f1dbfc3e415aa4cfd3f6567e Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Tue, 18 Jun 2024 20:41:26 +0200 Subject: [PATCH 37/44] golangci-lint: Enable loggercheck linter Signed-off-by: Arve Knudsen --- .golangci.yml | 1 + util/treecache/treecache.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.golangci.yml b/.golangci.yml index f81b29ed2d..026d68a313 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -29,6 +29,7 @@ linters: - unused - usestdlibvars - whitespace + - loggercheck issues: max-same-issues: 0 diff --git a/util/treecache/treecache.go b/util/treecache/treecache.go index 06356bb0bb..bbbaaf3d6e 100644 --- a/util/treecache/treecache.go +++ b/util/treecache/treecache.go @@ -200,7 +200,7 @@ func (tc *ZookeeperTreeCache) loop(path string) { failure() } else { tc.resyncState(tc.prefix, tc.head, previousState) - level.Info(tc.logger).Log("Zookeeper resync successful") + level.Info(tc.logger).Log("msg", "Zookeeper resync successful") failureMode = false } case <-tc.stop: From 545d31f184cd405eeacf4b6f035d9b6ddbdee6bd Mon Sep 17 00:00:00 2001 From: anarcat Date: Wed, 19 Jun 2024 01:46:13 -0400 Subject: [PATCH 38/44] docs: clarify backup requirements for storage (#14297) * clarify backup requirements for storage After reading this (again) recently, I was under the impression that our backup strategy ("just throw Bacula at it") was just not good enough and that our backups were inconsistent. I filed [an issue internally][41627] about this because of that concern. But reading a conversation with @SuperQ on IRC, I came under the impression that only the WAL files would be lost. This is an attempt at documenting this more clearly. [41627]: https://gitlab.torproject.org/tpo/tpa/team/-/issues/41627 --------- Signed-off-by: anarcat Co-authored-by: Ben Kochie --- docs/storage.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/docs/storage.md b/docs/storage.md index b66f2062af..947960fe12 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -61,8 +61,11 @@ A Prometheus server's data directory looks something like this: Note that a limitation of local storage is that it is not clustered or replicated. Thus, it is not arbitrarily scalable or durable in the face of drive or node outages and should be managed like any other single node -database. The use of RAID is suggested for storage availability, and -[snapshots](querying/api.md#snapshot) are recommended for backups. With proper +database. + +[Snapshots](querying/api.md#snapshot) are recommended for backups. Backups +made without snapshots run the risk of losing data that was recorded since +the last WAL sync, which typically happens every two hours. With proper architecture, it is possible to retain years of data in local storage. Alternatively, external storage may be used via the From 94d28cd6cf96d0f56f8cd5f3f0b7b9777eb26640 Mon Sep 17 00:00:00 2001 From: machine424 Date: Fri, 31 May 2024 16:40:09 +0200 Subject: [PATCH 39/44] chore(notifier): add a reproducer for https://github.com/prometheus/prometheus/issues/13676 to show "targets groups update" starvation when the notifications queue is full and an Alertmanager is down. The existing `TestHangingNotifier` that was added in https://github.com/prometheus/prometheus/pull/10948 doesn't really reflect the reality as the SD changes are manually fed into `syncCh` in a continuous way, whereas in reality, updates are only resent every `updatert`. The test added here sets up an SD manager and links it to the notifier. The SD changes will be triggered by that manager as it's done in reality. Signed-off-by: machine424 Co-authored-by: Ethan Hunter --- discovery/manager.go | 10 +++ notifier/notifier_test.go | 149 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 159 insertions(+) diff --git a/discovery/manager.go b/discovery/manager.go index f14071af30..897d7d151c 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -120,6 +120,16 @@ func Name(n string) func(*Manager) { } } +// Updatert sets the updatert of the manager. +// Used to speed up tests. +func Updatert(u time.Duration) func(*Manager) { + return func(m *Manager) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.updatert = u + } +} + // HTTPClientOptions sets the list of HTTP client options to expose to // Discoverers. It is up to Discoverers to choose to use the options provided. func HTTPClientOptions(opts ...config.HTTPClientOption) func(*Manager) { diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go index d2e72ca33b..5c82decbe0 100644 --- a/notifier/notifier_test.go +++ b/notifier/notifier_test.go @@ -26,13 +26,17 @@ import ( "testing" "time" + "github.com/go-kit/log" "github.com/prometheus/alertmanager/api/v2/models" + "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/atomic" "gopkg.in/yaml.v2" + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/labels" @@ -811,3 +815,148 @@ func TestHangingNotifier(t *testing.T) { }) } } + +// TODO: renameit and even replace TestHangingNotifier with it. +// TestHangingNotifierXXX ensures that the notifier takes into account SD changes even when there are +// queued alerts. This test reproduces the issue described in https://github.com/prometheus/prometheus/issues/13676. +func TestHangingNotifierXXX(t *testing.T) { + const ( + batches = 100 + alertsCount = maxBatchSize * batches + ) + + var ( + sendTimeout = 10 * time.Millisecond + sdUpdatert = sendTimeout / 2 + + done = make(chan struct{}) + ) + + defer func() { + close(done) + }() + + // Set up a faulty Alertmanager. + var faultyCalled atomic.Bool + faultyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + faultyCalled.Store(true) + select { + case <-done: + case <-time.After(time.Hour): + } + })) + faultyURL, err := url.Parse(faultyServer.URL) + require.NoError(t, err) + + // Set up a functional Alertmanager. + var functionalCalled atomic.Bool + functionalServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + functionalCalled.Store(true) + })) + functionalURL, err := url.Parse(functionalServer.URL) + require.NoError(t, err) + + // Initialize the discovery manager + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + reg := prometheus.NewRegistry() + sdMetrics, err := discovery.RegisterSDMetrics(reg, discovery.NewRefreshMetrics(reg)) + require.NoError(t, err) + sdManager := discovery.NewManager( + ctx, + log.NewNopLogger(), + reg, + sdMetrics, + discovery.Name("sd-manager"), + discovery.Updatert(sdUpdatert), + ) + go sdManager.Run() + + // Set up the notifier with both faulty and functional Alertmanagers. + notifier := NewManager( + &Options{ + QueueCapacity: alertsCount, + }, + nil, + ) + notifier.alertmanagers = make(map[string]*alertmanagerSet) + amCfg := config.DefaultAlertmanagerConfig + amCfg.Timeout = model.Duration(sendTimeout) + notifier.alertmanagers["config-0"] = &alertmanagerSet{ + ams: []alertmanager{ + alertmanagerMock{ + urlf: func() string { return faultyURL.String() }, + }, + alertmanagerMock{ + urlf: func() string { return functionalURL.String() }, + }, + }, + cfg: &amCfg, + metrics: notifier.metrics, + } + go notifier.Run(sdManager.SyncCh()) + defer notifier.Stop() + + require.Len(t, notifier.Alertmanagers(), 2) + + // Enqueue the alerts. + var alerts []*Alert + for i := range make([]struct{}, alertsCount) { + alerts = append(alerts, &Alert{ + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), + }) + } + notifier.Send(alerts...) + + // Wait for the Alertmanagers to start receiving alerts. + // 10*sdUpdatert is used as an arbitrary timeout here. + timeout := time.After(10 * sdUpdatert) +loop1: + for { + select { + case <-timeout: + t.Fatalf("Timeout waiting for the alertmanagers to be reached for the first time.") + default: + if faultyCalled.Load() && functionalCalled.Load() { + break loop1 + } + } + } + + // Request to remove the faulty Alertmanager. + c := map[string]discovery.Configs{ + "config-0": { + discovery.StaticConfig{ + &targetgroup.Group{ + Targets: []model.LabelSet{ + { + model.AddressLabel: model.LabelValue(functionalURL.Host), + }, + }, + }, + }, + }, + } + require.NoError(t, sdManager.ApplyConfig(c)) + + // The notifier should not wait until the alerts queue is empty to apply the discovery changes + // A faulty Alertmanager could cause each alert sending cycle to take up to AlertmanagerConfig.Timeout + // The queue may never be emptied, as the arrival rate could be larger than the departure rate + // It could even overflow and alerts could be dropped. + timeout = time.After(batches * sendTimeout) +loop2: + for { + select { + case <-timeout: + t.Fatalf("Timeout, the faulty alertmanager not removed on time.") + default: + // The faulty alertmanager was dropped. + if len(notifier.Alertmanagers()) == 1 { + // Prevent from TOCTOU. + require.Positive(t, notifier.queueLen()) + break loop2 + } + require.Positive(t, notifier.queueLen(), "The faulty alertmanager wasn't dropped before the alerts queue was emptied.") + } + } +} From 690de487e2fb0be3e3a41121bbe2b9ac3e7a844c Mon Sep 17 00:00:00 2001 From: machine424 Date: Mon, 3 Jun 2024 18:09:51 +0200 Subject: [PATCH 40/44] chore(notifier): Split 'Run()' into two goroutines: one to receive target updates and trigger reloads and the other one to send notifications. This is done to prevent the latter operation from blocking/starving the former, as previously, the `tsets` channel was consumed by the same goroutine that consumes and feeds the buffered `n.more` channel, the `tsets` channel was less likely to be ready as it's unbuffered and only fed every `SDManager.updatert` seconds. See https://github.com/prometheus/prometheus/issues/13676 and https://github.com/prometheus/prometheus/issues/8768 The synchronization with the sendLoop goroutine is managed through the n.mtx mutex. This uses a similar approach than scrape manager's https://github.com/prometheus/prometheus/blob/efbd6e41c59ec8d6b7a0791c1fb337fdac53b4f2/scrape/manager.go#L115-L117 The old TestHangingNotifier was replaced by the new one to more closely reflect reality. Signed-off-by: machine424 --- notifier/notifier.go | 35 ++++++----- notifier/notifier_test.go | 123 ++------------------------------------ 2 files changed, 25 insertions(+), 133 deletions(-) diff --git a/notifier/notifier.go b/notifier/notifier.go index 4cf376aa05..a375a0749c 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -298,25 +298,14 @@ func (n *Manager) nextBatch() []*Alert { return alerts } -// Run dispatches notifications continuously. -func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { +// sendLoop continuously consumes the notifications queue and sends alerts to +// the configured Alertmanagers. +func (n *Manager) sendLoop() { for { - // The select is split in two parts, such as we will first try to read - // new alertmanager targets if they are available, before sending new - // alerts. select { case <-n.ctx.Done(): return - case ts := <-tsets: - n.reload(ts) - default: - select { - case <-n.ctx.Done(): - return - case ts := <-tsets: - n.reload(ts) - case <-n.more: - } + case <-n.more: } alerts := n.nextBatch() @@ -330,6 +319,21 @@ func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { } } +// Run receives updates of target groups and triggers a reload. +// The dispatching of notifications occurs in the background to prevent blocking the receipt of target updates. +// Refer to https://github.com/prometheus/prometheus/issues/13676 for more details. +func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { + go n.sendLoop() + for { + select { + case <-n.ctx.Done(): + return + case ts := <-tsets: + n.reload(ts) + } + } +} + func (n *Manager) reload(tgs map[string][]*targetgroup.Group) { n.mtx.Lock() defer n.mtx.Unlock() @@ -483,6 +487,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { ams.mtx.RLock() + if len(ams.cfg.AlertRelabelConfigs) > 0 { amAlerts = relabelAlerts(ams.cfg.AlertRelabelConfigs, labels.Labels{}, alerts) if len(amAlerts) == 0 { diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go index 5c82decbe0..03290a58ca 100644 --- a/notifier/notifier_test.go +++ b/notifier/notifier_test.go @@ -701,125 +701,10 @@ func TestLabelsToOpenAPILabelSet(t *testing.T) { require.Equal(t, models.LabelSet{"aaa": "111", "bbb": "222"}, labelsToOpenAPILabelSet(labels.FromStrings("aaa", "111", "bbb", "222"))) } -// TestHangingNotifier validates that targets updates happen even when there are -// queued alerts. -func TestHangingNotifier(t *testing.T) { - // Note: When targets are not updated in time, this test is flaky because go - // selects are not deterministic. Therefore we run 10 subtests to run into the issue. - for i := 0; i < 10; i++ { - t.Run(strconv.Itoa(i), func(t *testing.T) { - var ( - done = make(chan struct{}) - changed = make(chan struct{}) - syncCh = make(chan map[string][]*targetgroup.Group) - ) - - defer func() { - close(done) - }() - - var calledOnce bool - // Setting up a bad server. This server hangs for 2 seconds. - badServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if calledOnce { - t.Fatal("hanging server called multiple times") - } - calledOnce = true - select { - case <-done: - case <-time.After(2 * time.Second): - } - })) - badURL, err := url.Parse(badServer.URL) - require.NoError(t, err) - badAddress := badURL.Host // Used for __name__ label in targets. - - // Setting up a bad server. This server returns fast, signaling requests on - // by closing the changed channel. - goodServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - close(changed) - })) - goodURL, err := url.Parse(goodServer.URL) - require.NoError(t, err) - goodAddress := goodURL.Host // Used for __name__ label in targets. - - h := NewManager( - &Options{ - QueueCapacity: 20 * maxBatchSize, - }, - nil, - ) - - h.alertmanagers = make(map[string]*alertmanagerSet) - - am1Cfg := config.DefaultAlertmanagerConfig - am1Cfg.Timeout = model.Duration(200 * time.Millisecond) - - h.alertmanagers["config-0"] = &alertmanagerSet{ - ams: []alertmanager{}, - cfg: &am1Cfg, - metrics: h.metrics, - } - go h.Run(syncCh) - defer h.Stop() - - var alerts []*Alert - for i := range make([]struct{}, 20*maxBatchSize) { - alerts = append(alerts, &Alert{ - Labels: labels.FromStrings("alertname", strconv.Itoa(i)), - }) - } - - // Injecting the hanging server URL. - syncCh <- map[string][]*targetgroup.Group{ - "config-0": { - { - Targets: []model.LabelSet{ - { - model.AddressLabel: model.LabelValue(badAddress), - }, - }, - }, - }, - } - - // Queing alerts. - h.Send(alerts...) - - // Updating with a working alertmanager target. - go func() { - select { - case syncCh <- map[string][]*targetgroup.Group{ - "config-0": { - { - Targets: []model.LabelSet{ - { - model.AddressLabel: model.LabelValue(goodAddress), - }, - }, - }, - }, - }: - case <-done: - } - }() - - select { - case <-time.After(1 * time.Second): - t.Fatalf("Timeout after 1 second, targets not synced in time.") - case <-changed: - // The good server has been hit in less than 3 seconds, therefore - // targets have been updated before a second call could be made to the - // bad server. - } - }) - } -} - -// TODO: renameit and even replace TestHangingNotifier with it. -// TestHangingNotifierXXX ensures that the notifier takes into account SD changes even when there are +// TestHangingNotifier ensures that the notifier takes into account SD changes even when there are // queued alerts. This test reproduces the issue described in https://github.com/prometheus/prometheus/issues/13676. -func TestHangingNotifierXXX(t *testing.T) { +// and https://github.com/prometheus/prometheus/issues/8768. +func TestHangingNotifier(t *testing.T) { const ( batches = 100 alertsCount = maxBatchSize * batches @@ -857,6 +742,8 @@ func TestHangingNotifierXXX(t *testing.T) { require.NoError(t, err) // Initialize the discovery manager + // This is relevant as the updates aren't sent continually in real life, but only each updatert. + // The old implementation of TestHangingNotifier didn't take that into acount. ctx, cancel := context.WithCancel(context.Background()) defer cancel() reg := prometheus.NewRegistry() From 70beda092a47c161b0483eb93c97547acbbdf63b Mon Sep 17 00:00:00 2001 From: machine424 Date: Mon, 10 Jun 2024 21:26:36 +0200 Subject: [PATCH 41/44] fix(notifier): take alertmanagerSet.mtx before checking alertmanagerSet.ams in sendAll Signed-off-by: machine424 --- notifier/notifier.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/notifier/notifier.go b/notifier/notifier.go index a375a0749c..eb83c45b07 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -475,10 +475,6 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { numSuccess atomic.Uint64 ) for _, ams := range amSets { - if len(ams.ams) == 0 { - continue - } - var ( payload []byte err error @@ -487,6 +483,10 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { ams.mtx.RLock() + if len(ams.ams) == 0 { + ams.mtx.RUnlock() + continue + } if len(ams.cfg.AlertRelabelConfigs) > 0 { amAlerts = relabelAlerts(ams.cfg.AlertRelabelConfigs, labels.Labels{}, alerts) From 35564c0cb094b1c1b0eca37fdb95c13489e7f8f0 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Wed, 19 Jun 2024 17:30:49 +0200 Subject: [PATCH 42/44] Export remote.LabelsToLabelsProto() and remote.LabelProtosToLabels() Signed-off-by: Marco Pracucci --- storage/remote/codec.go | 16 +++++++++------- storage/remote/codec_test.go | 6 +++--- storage/remote/queue_manager.go | 4 ++-- storage/remote/queue_manager_test.go | 4 ++-- storage/remote/read_test.go | 6 +++--- storage/remote/write_handler.go | 2 +- storage/remote/write_handler_test.go | 4 ++-- 7 files changed, 22 insertions(+), 20 deletions(-) diff --git a/storage/remote/codec.go b/storage/remote/codec.go index c3b815a4d3..8c569ff038 100644 --- a/storage/remote/codec.go +++ b/storage/remote/codec.go @@ -166,7 +166,7 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult, } resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{ - Labels: labelsToLabelsProto(series.Labels(), nil), + Labels: LabelsToLabelsProto(series.Labels(), nil), Samples: samples, Histograms: histograms, }) @@ -182,7 +182,7 @@ func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet if err := validateLabelsAndMetricName(ts.Labels); err != nil { return errSeriesSet{err: err} } - lbls := labelProtosToLabels(&b, ts.Labels) + lbls := LabelProtosToLabels(&b, ts.Labels) series = append(series, &concreteSeries{labels: lbls, floats: ts.Samples, histograms: ts.Histograms}) } @@ -235,7 +235,7 @@ func StreamChunkedReadResponses( for ss.Next() { series := ss.At() iter = series.Iterator(iter) - lbls = MergeLabels(labelsToLabelsProto(series.Labels(), lbls), sortedExternalLabels) + lbls = MergeLabels(LabelsToLabelsProto(series.Labels(), lbls), sortedExternalLabels) maxDataLength := maxBytesInFrame for _, lbl := range lbls { @@ -622,7 +622,7 @@ func exemplarProtoToExemplar(b *labels.ScratchBuilder, ep prompb.Exemplar) exemp timestamp := ep.Timestamp return exemplar.Exemplar{ - Labels: labelProtosToLabels(b, ep.Labels), + Labels: LabelProtosToLabels(b, ep.Labels), Value: ep.Value, Ts: timestamp, HasTs: timestamp != 0, @@ -762,7 +762,9 @@ func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric { return metric } -func labelProtosToLabels(b *labels.ScratchBuilder, labelPairs []prompb.Label) labels.Labels { +// LabelProtosToLabels transforms prompb labels into labels. The labels builder +// will be used to build the returned labels. +func LabelProtosToLabels(b *labels.ScratchBuilder, labelPairs []prompb.Label) labels.Labels { b.Reset() for _, l := range labelPairs { b.Add(l.Name, l.Value) @@ -771,9 +773,9 @@ func labelProtosToLabels(b *labels.ScratchBuilder, labelPairs []prompb.Label) la return b.Labels() } -// labelsToLabelsProto transforms labels into prompb labels. The buffer slice +// LabelsToLabelsProto transforms labels into prompb labels. The buffer slice // will be used to avoid allocations if it is big enough to store the labels. -func labelsToLabelsProto(lbls labels.Labels, buf []prompb.Label) []prompb.Label { +func LabelsToLabelsProto(lbls labels.Labels, buf []prompb.Label) []prompb.Label { result := buf[:0] lbls.Range(func(l labels.Label) { result = append(result, prompb.Label{ diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index 41d4b3656c..c3a4cbc6dd 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -729,8 +729,8 @@ func TestFloatHistogramToProtoConvert(t *testing.T) { } func TestStreamResponse(t *testing.T) { - lbs1 := labelsToLabelsProto(labels.FromStrings("instance", "localhost1", "job", "demo1"), nil) - lbs2 := labelsToLabelsProto(labels.FromStrings("instance", "localhost2", "job", "demo2"), nil) + lbs1 := LabelsToLabelsProto(labels.FromStrings("instance", "localhost1", "job", "demo1"), nil) + lbs2 := LabelsToLabelsProto(labels.FromStrings("instance", "localhost2", "job", "demo2"), nil) chunk := prompb.Chunk{ Type: prompb.Chunk_XOR, Data: make([]byte, 100), @@ -802,7 +802,7 @@ func (c *mockChunkSeriesSet) Next() bool { func (c *mockChunkSeriesSet) At() storage.ChunkSeries { return &storage.ChunkSeriesEntry{ - Lset: labelProtosToLabels(&c.builder, c.chunkedSeries[c.index].Labels), + Lset: LabelProtosToLabels(&c.builder, c.chunkedSeries[c.index].Labels), ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator { return &mockChunkIterator{ chunks: c.chunkedSeries[c.index].Chunks, diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 01d2db06a5..b244b331b0 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -1507,7 +1507,7 @@ func (s *shards) populateTimeSeries(batch []timeSeries, pendingData []prompb.Tim // Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff) // retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll // stop reading from the queue. This makes it safe to reference pendingSamples by index. - pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels) + pendingData[nPending].Labels = LabelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels) switch d.sType { case tSample: pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{ @@ -1517,7 +1517,7 @@ func (s *shards) populateTimeSeries(batch []timeSeries, pendingData []prompb.Tim nPendingSamples++ case tExemplar: pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, prompb.Exemplar{ - Labels: labelsToLabelsProto(d.exemplarLabels, nil), + Labels: LabelsToLabelsProto(d.exemplarLabels, nil), Value: d.value, Timestamp: d.timestamp, }) diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index 6121fb6c03..06783167fb 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -742,7 +742,7 @@ func (c *TestWriteClient) expectExemplars(ss []record.RefExemplar, series []reco for _, s := range ss { seriesName := getSeriesNameFromRef(series[s.Ref]) e := prompb.Exemplar{ - Labels: labelsToLabelsProto(s.Labels, nil), + Labels: LabelsToLabelsProto(s.Labels, nil), Timestamp: s.T, Value: s.V, } @@ -826,7 +826,7 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error { builder := labels.NewScratchBuilder(0) count := 0 for _, ts := range reqProto.Timeseries { - labels := labelProtosToLabels(&builder, ts.Labels) + labels := LabelProtosToLabels(&builder, ts.Labels) seriesName := labels.Get("__name__") for _, sample := range ts.Samples { count++ diff --git a/storage/remote/read_test.go b/storage/remote/read_test.go index 87408dfb4f..810009af0f 100644 --- a/storage/remote/read_test.go +++ b/storage/remote/read_test.go @@ -172,12 +172,12 @@ func TestSeriesSetFilter(t *testing.T) { toRemove: []string{"foo"}, in: &prompb.QueryResult{ Timeseries: []*prompb.TimeSeries{ - {Labels: labelsToLabelsProto(labels.FromStrings("foo", "bar", "a", "b"), nil)}, + {Labels: LabelsToLabelsProto(labels.FromStrings("foo", "bar", "a", "b"), nil)}, }, }, expected: &prompb.QueryResult{ Timeseries: []*prompb.TimeSeries{ - {Labels: labelsToLabelsProto(labels.FromStrings("a", "b"), nil)}, + {Labels: LabelsToLabelsProto(labels.FromStrings("a", "b"), nil)}, }, }, }, @@ -211,7 +211,7 @@ func (c *mockedRemoteClient) Read(_ context.Context, query *prompb.Query) (*prom q := &prompb.QueryResult{} for _, s := range c.store { - l := labelProtosToLabels(&c.b, s.Labels) + l := LabelProtosToLabels(&c.b, s.Labels) var notMatch bool for _, m := range matchers { diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index ff227292b8..e7515a42b8 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -116,7 +116,7 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err b := labels.NewScratchBuilder(0) var exemplarErr error for _, ts := range req.Timeseries { - labels := labelProtosToLabels(&b, ts.Labels) + labels := LabelProtosToLabels(&b, ts.Labels) if !labels.IsValid() { level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", labels.String()) samplesWithInvalidLabels++ diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 5125290f7c..1715e92c27 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -60,14 +60,14 @@ func TestRemoteWriteHandler(t *testing.T) { j := 0 k := 0 for _, ts := range writeRequestFixture.Timeseries { - labels := labelProtosToLabels(&b, ts.Labels) + labels := LabelProtosToLabels(&b, ts.Labels) for _, s := range ts.Samples { requireEqual(t, mockSample{labels, s.Timestamp, s.Value}, appendable.samples[i]) i++ } for _, e := range ts.Exemplars { - exemplarLabels := labelProtosToLabels(&b, e.Labels) + exemplarLabels := LabelProtosToLabels(&b, e.Labels) requireEqual(t, mockExemplar{labels, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j]) j++ } From f9ca6c4ae613ec997297843f32cae3f6c4f0f20a Mon Sep 17 00:00:00 2001 From: machine424 Date: Tue, 18 Jun 2024 13:38:20 +0200 Subject: [PATCH 43/44] chore: add an alert based on the metric prometheus_sd_kubernetes_failures_total that was introcued in https://github.com/prometheus/prometheus/pull/13554 The same motivation for adding the metric applies: To avoid silent SD failures, as existing logs may not be regularly checked and can be missed. Signed-off-by: machine424 Co-authored-by: Simon Pasquier --- documentation/prometheus-mixin/alerts.libsonnet | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/documentation/prometheus-mixin/alerts.libsonnet b/documentation/prometheus-mixin/alerts.libsonnet index 508d89c244..563daab801 100644 --- a/documentation/prometheus-mixin/alerts.libsonnet +++ b/documentation/prometheus-mixin/alerts.libsonnet @@ -34,6 +34,20 @@ description: 'Prometheus %(prometheusName)s has failed to refresh SD with mechanism {{$labels.mechanism}}.' % $._config, }, }, + { + alert: 'PrometheusKubernetesListWatchFailures', + expr: ||| + increase(prometheus_sd_kubernetes_failures_total{%(prometheusSelector)s}[5m]) > 0 + ||| % $._config, + 'for': '15m', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Requests in Kubernetes SD are failing.', + description: 'Kubernetes service discovery of Prometheus %(prometheusName)s is experiencing {{ printf "%%.0f" $value }} failures with LIST/WATCH requests to the Kubernetes API in the last 5 minutes.' % $._config, + }, + }, { alert: 'PrometheusNotificationQueueRunningFull', expr: ||| From 2aaf99dd0ad23266a09e8be87087fa08c89d3f3e Mon Sep 17 00:00:00 2001 From: akunszt <32456696+akunszt@users.noreply.github.com> Date: Thu, 20 Jun 2024 15:36:20 +0200 Subject: [PATCH 44/44] discovery: aws: expose Primary IPv6 addresses as label, partially fixes #7406 (#14156) * discovery: aws: expose Primary IPv6 addresses as label Add __meta_ec2_primary_ipv6_addresses label. This label contains the Primary IPv6 address for every ENI attached to the EC2 instance. It is ordered by the DeviceIndex and the missing elements (interface without Primary IPv6 address) are kept in the list. --------- Signed-off-by: Arpad Kunszt Co-authored-by: Ayoub Mrini --- discovery/aws/ec2.go | 61 ++++++++++++++++++----------- docs/configuration/configuration.md | 1 + 2 files changed, 40 insertions(+), 22 deletions(-) diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go index a6a0a82577..a44912481a 100644 --- a/discovery/aws/ec2.go +++ b/discovery/aws/ec2.go @@ -42,28 +42,29 @@ import ( ) const ( - ec2Label = model.MetaLabelPrefix + "ec2_" - ec2LabelAMI = ec2Label + "ami" - ec2LabelAZ = ec2Label + "availability_zone" - ec2LabelAZID = ec2Label + "availability_zone_id" - ec2LabelArch = ec2Label + "architecture" - ec2LabelIPv6Addresses = ec2Label + "ipv6_addresses" - ec2LabelInstanceID = ec2Label + "instance_id" - ec2LabelInstanceLifecycle = ec2Label + "instance_lifecycle" - ec2LabelInstanceState = ec2Label + "instance_state" - ec2LabelInstanceType = ec2Label + "instance_type" - ec2LabelOwnerID = ec2Label + "owner_id" - ec2LabelPlatform = ec2Label + "platform" - ec2LabelPrimarySubnetID = ec2Label + "primary_subnet_id" - ec2LabelPrivateDNS = ec2Label + "private_dns_name" - ec2LabelPrivateIP = ec2Label + "private_ip" - ec2LabelPublicDNS = ec2Label + "public_dns_name" - ec2LabelPublicIP = ec2Label + "public_ip" - ec2LabelRegion = ec2Label + "region" - ec2LabelSubnetID = ec2Label + "subnet_id" - ec2LabelTag = ec2Label + "tag_" - ec2LabelVPCID = ec2Label + "vpc_id" - ec2LabelSeparator = "," + ec2Label = model.MetaLabelPrefix + "ec2_" + ec2LabelAMI = ec2Label + "ami" + ec2LabelAZ = ec2Label + "availability_zone" + ec2LabelAZID = ec2Label + "availability_zone_id" + ec2LabelArch = ec2Label + "architecture" + ec2LabelIPv6Addresses = ec2Label + "ipv6_addresses" + ec2LabelInstanceID = ec2Label + "instance_id" + ec2LabelInstanceLifecycle = ec2Label + "instance_lifecycle" + ec2LabelInstanceState = ec2Label + "instance_state" + ec2LabelInstanceType = ec2Label + "instance_type" + ec2LabelOwnerID = ec2Label + "owner_id" + ec2LabelPlatform = ec2Label + "platform" + ec2LabelPrimaryIPv6Addresses = ec2Label + "primary_ipv6_addresses" + ec2LabelPrimarySubnetID = ec2Label + "primary_subnet_id" + ec2LabelPrivateDNS = ec2Label + "private_dns_name" + ec2LabelPrivateIP = ec2Label + "private_ip" + ec2LabelPublicDNS = ec2Label + "public_dns_name" + ec2LabelPublicIP = ec2Label + "public_ip" + ec2LabelRegion = ec2Label + "region" + ec2LabelSubnetID = ec2Label + "subnet_id" + ec2LabelTag = ec2Label + "tag_" + ec2LabelVPCID = ec2Label + "vpc_id" + ec2LabelSeparator = "," ) // DefaultEC2SDConfig is the default EC2 SD configuration. @@ -317,6 +318,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error var subnets []string var ipv6addrs []string + var primaryipv6addrs []string subnetsMap := make(map[string]struct{}) for _, eni := range inst.NetworkInterfaces { if eni.SubnetId == nil { @@ -330,6 +332,15 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error for _, ipv6addr := range eni.Ipv6Addresses { ipv6addrs = append(ipv6addrs, *ipv6addr.Ipv6Address) + if *ipv6addr.IsPrimaryIpv6 { + // we might have to extend the slice with more than one element + // that could leave empty strings in the list which is intentional + // to keep the position/device index information + for int64(len(primaryipv6addrs)) <= *eni.Attachment.DeviceIndex { + primaryipv6addrs = append(primaryipv6addrs, "") + } + primaryipv6addrs[*eni.Attachment.DeviceIndex] = *ipv6addr.Ipv6Address + } } } labels[ec2LabelSubnetID] = model.LabelValue( @@ -342,6 +353,12 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error strings.Join(ipv6addrs, ec2LabelSeparator) + ec2LabelSeparator) } + if len(primaryipv6addrs) > 0 { + labels[ec2LabelPrimaryIPv6Addresses] = model.LabelValue( + ec2LabelSeparator + + strings.Join(primaryipv6addrs, ec2LabelSeparator) + + ec2LabelSeparator) + } } for _, t := range inst.Tags { diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 5df7dae3c0..164f426ad5 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -1229,6 +1229,7 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_ec2_ipv6_addresses`: comma separated list of IPv6 addresses assigned to the instance's network interfaces, if present * `__meta_ec2_owner_id`: the ID of the AWS account that owns the EC2 instance * `__meta_ec2_platform`: the Operating System platform, set to 'windows' on Windows servers, absent otherwise +* `__meta_ec2_primary_ipv6_addresses`: comma separated list of the Primary IPv6 addresses of the instance, if present. The list is ordered based on the position of each corresponding network interface in the attachment order. * `__meta_ec2_primary_subnet_id`: the subnet ID of the primary network interface, if available * `__meta_ec2_private_dns_name`: the private DNS name of the instance, if available * `__meta_ec2_private_ip`: the private IP address of the instance, if present